gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# encoding: utf-8
"""
setup.py
Created by Thomas Mangin on 2014-12-23.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.configuration.environment import environment
_SPACE = {
'space': ' '*33
}
HELP_STDOUT = """\
where logging should log
%(space)s syslog (or no setting) sends the data to the local syslog syslog
%(space)s host:<location> sends the data to a remote syslog server
%(space)s stdout sends the data to stdout
%(space)s stderr sends the data to stderr
%(space)s <filename> send the data to a file""" % _SPACE
environment.application = 'exabgp'
environment.configuration = {
'profile': {
'enable': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'toggle profiling of the code',
},
'file': {
'read': environment.unquote,
'write': environment.quote,
'value': '',
'help': 'profiling result file, none means stdout, no overwriting',
},
},
'pdb': {
'enable': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'on program fault, start pdb the python interactive debugger',
}
},
'daemon': {
'pid': {
'read': environment.unquote,
'write': environment.quote,
'value': '',
'help': 'where to save the pid if we manage it',
},
'user': {
'read': environment.user,
'write': environment.quote,
'value': 'nobody',
'help': 'user to run as',
},
'daemonize': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'should we run in the background',
},
'drop': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'drop privileges before forking processes',
},
'umask': {
'read': environment.umask_read,
'write': environment.umask_write,
'value': '0137',
'help': 'run daemon with this umask, governs perms of logfiles etc.',
},
},
'log': {
'enable': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'enable logging',
},
'level': {
'read': environment.syslog_value,
'write': environment.syslog_name,
'value': 'INFO',
'help': 'log message with at least the priority SYSLOG.<level>',
},
'destination': {
'read': environment.unquote,
'write': environment.quote,
'value': 'stdout',
'help': HELP_STDOUT,
},
'all': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report debug information for everything',
},
'configuration': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'report command parsing',
},
'reactor': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'report signal received, command reload',
},
'daemon': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'report pid change, forking, ...',
},
'processes': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'report handling of forked processes',
},
'network': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'report networking information (TCP/IP, network state,...)',
},
'packets': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report BGP packets sent and received',
},
'rib': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report change in locally configured routes',
},
'message': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report changes in route announcement on config reload',
},
'timers': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report keepalives timers',
},
'routes': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report received routes',
},
'parser': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'report BGP message parsing details',
},
'short': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'use short log format (not prepended with time,level,pid and source)',
},
},
'tcp': {
'once': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'only one tcp connection attempt per peer (for debuging scripts)',
},
'delay': {
'read': environment.integer,
'write': environment.nop,
'value': '0',
'help': 'start to announce route when the minutes in the hours is a modulo of this number',
},
'bind': {
'read': environment.optional_ip,
'write': environment.quote,
'value': '',
'help': 'IP to bind on when listening (no ip to disable)',
},
'port': {
'read': environment.integer,
'write': environment.nop,
'value': '179',
'help': 'port to bind on when listening',
},
'acl': {
'read': environment.boolean,
'write': environment.lower,
'value': '',
'help': '(experimental) unimplemented',
},
},
'bgp': {
'openwait': {
'read': environment.integer,
'write': environment.nop,
'value': '60',
'help': 'how many second we wait for an open once the TCP session is established',
},
},
'cache': {
'attributes': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'cache all attributes (configuration and wire) for faster parsing',
},
'nexthops': {
'read': environment.boolean,
'write': environment.lower,
'value': 'true',
'help': 'cache routes next-hops (deprecated: next-hops are always cached)',
},
},
'api': {
'encoder': {
'read': environment.api,
'write': environment.lower,
'value': 'json',
'help': '(experimental) default encoder to use with with external API (text or json)',
},
'compact': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'shorter JSON encoding for IPv4/IPv6 Unicast NLRI',
},
'respawn': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'should we respawn a helper process if it dies',
},
'file': {
'read': environment.unquote,
'write': environment.quote,
'value': '',
'help': 'where should we create a socket for remote control',
},
},
'reactor': {
'speed': {
'read': environment.real,
'write': environment.nop,
'value': '1.0',
'help': 'reactor loop time\n%(space)s use only if you understand the code.' % _SPACE,
},
},
# Here for internal use
'debug': {
'pdb': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'enable python debugger on errors',
},
'memory': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'command line option --memory',
},
'configuration': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'undocumented option: raise when parsing configuration errors',
},
'selfcheck': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'does a self check on the configuration file',
},
'route': {
'read': environment.unquote,
'write': environment.quote,
'value': '',
'help': 'decode the route using the configuration',
},
'defensive': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'generate random fault in the code in purpose',
},
'rotate': {
'read': environment.boolean,
'write': environment.lower,
'value': 'false',
'help': 'rotate configurations file on reload (signal)',
},
},
}
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 3651
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import refs_aggregate, Q
from django.utils import timezone
from django.utils.functional import cached_property
class CombinableMixin(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return Expression(other, connector, self)
return Expression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def __init__(self, output_field=None):
self._output_field = output_field
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(ExpressionNode, self).as_sql(compiler, connection)
setattr(ExpressionNode, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
Returns: an ExpressionNode to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
self._output_field = sources[0]
for source in sources:
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def refs_field(self, aggregate_types, field_types):
"""
Helper method for check_aggregate_support on backends
"""
return any(
node.refs_field(aggregate_types, field_types)
for node in self.get_source_expressions())
def prepare_database_save(self, field):
return self
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
class ExpressionNode(BaseExpression, CombinableMixin):
"""
An expression that can be combined with other expressions.
"""
pass
class Expression(ExpressionNode):
def __init__(self, lhs, connector, rhs, output_field=None):
super(Expression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(Expression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(CombinableMixin):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(ExpressionNode):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
def __init__(self, *expressions, **extra):
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else F(arg)
for arg in expressions
]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(ExpressionNode):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def as_sql(self, compiler, connection):
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(ExpressionNode):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Random(ExpressionNode):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(ExpressionNode):
def __init__(self, alias, target, source=None):
if source is None:
source = target
super(Col, self).__init__(output_field=source)
self.alias, self.target = alias, target
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
class Ref(ExpressionNode):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.source = source
self.refs = refs
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class When(ExpressionNode):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=Value(None), **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expression(then)
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def _parse_expression(self, expression):
return expression if hasattr(expression, 'resolve_expression') else F(expression)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(ExpressionNode):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', Value(None))
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = default if hasattr(default, 'resolve_expression') else F(default)
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, extra=None):
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(ExpressionNode):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(ExpressionNode):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
descending_template = 'DESC'
ascending_template = 'ASC'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
expression_sql, params = compiler.compile(self.expression)
placeholders = {'expression': expression_sql}
placeholders['ordering'] = 'DESC' if self.descending else 'ASC'
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| |
#-*- coding: utf-8 -*-
""" EOSS catalog system
Reads data provided by USGS directly via http or csv files (from http://landsat.usgs.gov/metadatalist.php) and creates Catalog_Dataset objects
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
from api.eoss_api import Api
from manage.landsat_catalog import USGS_HTTP_SERVICE, USGSCatalog
from model.plain_models import USGSOrderContainer, GoogleLandsatContainer, S3PublicContainer, \
Catalog_Dataset
import datetime
from utilities.web_utils import remote_file_exists
import dateutil
def landsat_harvester(in_csv):
datasets = []
header = False
with open(in_csv, 'r') as f:
for counter, line in enumerate(f):
if header == False:
header = True
else:
content_list = line.split(',')
if (content_list[52] != 'LANDSAT_4'):
dataset = Catalog_Dataset()
dataset.entity_id = content_list[0]
dataset.sensor = content_list[1]
dataset.tile_identifier = '%03d%03d' % (int(content_list[6]), int(content_list[7]))
dataset.clouds = float(content_list[19])
dataset.daynight = str.lower(content_list[24])
if dataset.sensor == "LANDSAT_TM":
dataset.acq_time = datetime.strptime(content_list[29][:-6], '%Y:%j:%H:%M:%S')
else:
dataset.acq_time = datetime.strptime(content_list[29][:-8], '%Y:%j:%H:%M:%S')
dataset.level = content_list[53]
container = dict()
container['quicklook'] = content_list[5]
sensor = dataset.sensor
if sensor == "OLI_TIRS":
sensor = "LANDSAT_8"
container['metadata'] = USGS_HTTP_SERVICE % (
int(content_list[6]), int(content_list[6]), int(content_list[7]), int(content_list[7]), sensor,
dataset.acq_time.strftime("%Y-%m-%d"), dataset.acq_time.strftime("%Y-%m-%d"))
usgs = USGSOrderContainer()
usgs.link = content_list[54]
container.update(usgs.to_dict())
google = GoogleLandsatContainer()
google_sensors = {'OLI_TIRS': 'L8', 'LANDSAT_ETM_SLC_OFF': 'L7', 'LANDSAT_ETM': 'L7',
'LANDSAT_TM': 'L5', 'TIRS': 'L8', 'OLI': 'L8'}
google_link = google.base % (
google_sensors[content_list[1]], int(content_list[6]), int(content_list[7]), content_list[0])
if remote_file_exists(google_link):
google.link = google_link
container.update(google.to_dict())
dataset.resources = container
datasets.append(dataset)
else:
print 'skipping...'
if (counter % 25000) == 0:
print counter
return datasets
def landsat_harvester_line(lines):
datasets = []
header = False
for counter, line in enumerate(lines):
content_list = line.split(',')
if (content_list[52] != 'LANDSAT_4'):
dataset = Catalog_Dataset()
dataset.entity_id = content_list[0]
dataset.sensor = content_list[1]
dataset.tile_identifier = '%03d%03d' % (int(content_list[6]), int(content_list[7]))
dataset.clouds = float(content_list[19])
dataset.daynight = str.lower(content_list[24])
if dataset.sensor == "LANDSAT_TM":
dataset.acq_time = datetime.datetime.strptime(content_list[29][:-6], '%Y:%j:%H:%M:%S')
else:
dataset.acq_time = datetime.datetime.strptime(content_list[29][:-8], '%Y:%j:%H:%M:%S')
dataset.level = content_list[53]
container = dict()
container['quicklook'] = content_list[5]
sensor = dataset.sensor
if sensor == "OLI_TIRS":
sensor = "LANDSAT_8"
container['metadata'] = USGS_HTTP_SERVICE % (
int(content_list[6]), int(content_list[6]), int(content_list[7]), int(content_list[7]), sensor,
dataset.acq_time.strftime("%Y-%m-%d"), dataset.acq_time.strftime("%Y-%m-%d"))
usgs = USGSOrderContainer()
usgs.link = content_list[54]
container.update(usgs.to_dict())
google = GoogleLandsatContainer()
google_link = GoogleLandsatContainer.base % (
GoogleLandsatContainer.supported_sensors[content_list[1]], int(content_list[6]), int(content_list[7]), content_list[0])
if remote_file_exists(google_link):
google.link = google_link
container.update(google.to_dict())
dataset.resources = container
datasets.append(dataset)
if (counter % 25000) == 0:
print counter
return datasets
def import_from_file_ls(in_csv):
datasets = landsat_harvester(in_csv)
api = Api()
skipped = list()
registered = list()
for c, ds in enumerate(datasets):
try:
out = api.create_dataset(ds)
if not 'title' in str(out):
registered.append(c)
else:
skipped.append(c)
except Exception, e:
print e
if c % 100 == 0:
print c
print 'skipped:', skipped
print 'registered:', registered
skipped = list()
registered = list()
def import_from_pipe_ls(lines):
datasets = landsat_harvester_line(lines)
api = Api()
skipped = list()
registered = list()
for c, ds in enumerate(datasets):
try:
out = api.create_dataset(ds)
if not 'already' in str(out):
registered.append(c)
else:
skipped.append(c)
except Exception, e:
print e
print 'registered:', registered
print 'skipped:', skipped
def import_from_landsat_catalog(sensor,start_date, api_url):
api = Api(api_url)
max_cloud_ratio = 1.0
ag_season_start = dateutil.parser.parse(start_date)
ag_season_end = ag_season_start + datetime.timedelta(days=1)
aoi_se = (180, -90)
aoi_nw = (-180, 90)
aoi_ne = (aoi_se[0], aoi_nw[1])
aoi_sw = (aoi_nw[0], aoi_se[1])
aoi = [aoi_nw, aoi_ne, aoi_se, aoi_sw, aoi_nw]
cat = USGSCatalog()
# "LANDSAT_8", "LANDSAT_ETM_SLC_OFF", "LANDSAT_ETM"
datasets = cat.find(sensor, aoi, ag_season_start, ag_season_end, max_cloud_ratio)
if datasets != None:
ds_found = list()
ds_missing = list()
for counter, ds in enumerate(datasets):
catalog_ds = api.get_dataset(ds.entity_id)
if catalog_ds is None or len(catalog_ds) == 0:
ds_missing.append(ds)
elif len(catalog_ds) == 1:
ds_found.append(catalog_ds)
else:
print 'More in catalog found: %s (%d)' % (ds.entity_id, len(catalog_ds))
if (counter % 25) == 0:
print counter, len(datasets)
print 'already registered: ', len(ds_found), len(datasets)
print 'missing: ', len(ds_missing), len(datasets)
for counter, ds_obj in enumerate(ds_missing):
new_ds = api.create_dataset(ds_obj)
if not new_ds is None:
print new_ds
if (counter % 25) == 0:
print counter, len(ds_missing)
else:
print 'No data found in catalog for sentinel from %s to %s' % (
ag_season_start.strftime("%Y-%m-%d"), ag_season_end.strftime("%Y-%m-%d"))
| |
import pygame, sys, PR
from pygame.locals import *
from npc import *
#CONSTANTS
WIN_WIDTH = 600
WIN_HEADER = 40
WIN_HEIGHT = WIN_WIDTH+WIN_HEADER
GAME_WIDTH = WIN_WIDTH
GAME_HEIGHT = WIN_HEIGHT - WIN_HEADER
GAME_BORDER = 5
BLACK = (0,0,0)
WHITE = (255, 255, 255)
BOSS_GREEN = (0, 200, 122)
RED = (255, 0, 0)
MAX_FRAMES = 1500
DISPLAY = True
DISPLAY_SURF = None
BOSS_VX = 6
BULLET_VY = 12
class bullet():
def __init__(self, init_x, init_y, init_vy):
#hardcoded bullet dimensions
self.width = 5
self.height = 10
if DISPLAY:
self.shape = pygame.Surface((self.width, self.height)).convert()
self.Rect = PR.Rect((init_x, init_y), (init_x+self.width, init_y+self.height))
self.vy = init_vy
self.vx = 0
self.x = init_x
self.y = init_y
def update(self):
self.y+=self.vy
self.Rect = PR.Rect((self.x, self.y), (self.x+self.width, self.y+self.height))
def checkInBounds(self):
return self.y > WIN_HEADER and self.y < WIN_HEIGHT
def draw(self, DS):
self.shape.fill(WHITE)
DS.blit(self.shape, (self.x, self.y))
class npcChar():
def __init__(self, player):
self.x = player.x
self.y = player.y
self.brain = player
if DISPLAY:
self.shape = pygame.Surface((player.width, player.height)).convert()
self.Rect = PR.Rect((self.x, self.y), (self.x+player.width, self.y+player.height))
self.health = 100
def update(self):
self.brain.update()
self.x = self.brain.x
self.Rect = PR.Rect((self.x, self.y), (self.x+self.brain.width, self.y+self.brain.height))
def shoot(self, bulletlist):
if self.brain.okToShoot():
bulletlist.append( bullet( self.x+self.brain.width/2, self.brain.y, -1*BULLET_VY ) )
def checkCollisions(self, bulletlist):
for bullet in bulletlist:
#if bullet.x >= self.x and bullet.x <= self.x+self.brain.width and bullet.y+bullet.height >= self.y+self.brain.height:
if self.Rect.overlaps(bullet.Rect):
self.health-=10
bulletlist.remove(bullet)
def draw(self, DS):
self.shape.fill(RED)
DS.blit(self.shape, (self.x, self.y))
class BossChar():
def __init__(self, boss_width, boss_height):
self.width = boss_width
self.height = boss_height
self.x = (WIN_WIDTH - self.width)/2
self.y = WIN_HEADER+GAME_BORDER
if DISPLAY:
self.shape = pygame.Surface((self.width, self.height)).convert()
self.Rect = PR.Rect((self.x, self.y), (self.x+self.width, self.y+self.height))
self.health = 100
def checkCollisions(self, bulletlist):
for bullet in bulletlist:
#if bullet.x >= self.x and bullet.x <= self.x+self.width and bullet.y <= self.y+self.height:
if self.Rect.overlaps(bullet.Rect):
self.health -= 10
bulletlist.remove(bullet)
def update(self, movement):
#2 cases for input
if(movement > 0.5):
vx = BOSS_VX
else:
vx = -1 * BOSS_VX
new_pos = self.x + vx
if new_pos < GAME_BORDER:
self.x = GAME_BORDER
elif new_pos > WIN_WIDTH - GAME_BORDER - self.width:
self.x = WIN_WIDTH - GAME_BORDER - self.width
else:
self.x = new_pos
self.Rect = self.Rect = PR.Rect((self.x, self.y), (self.x+self.width, self.y+self.height))
#1 case moves it to the left
#other case moves it to the right
def shoot(self, trigger, bulletlist):
if len(bulletlist) < 5 and trigger:
bulletlist.append(bullet(self.x+self.width/2, self.y+self.height, BULLET_VY))
def draw(self, DS):
self.shape.fill(BOSS_GREEN)
DS.blit(self.shape, (self.x, self.y))
def drawBackground(DISPLAY_SURF):
DISPLAY_SURF.fill(BLACK)
pygame.draw.rect(DISPLAY_SURF, WHITE, pygame.Rect(0, WIN_HEADER, GAME_WIDTH, GAME_HEIGHT), GAME_BORDER)
def checkBullets(bulletlist):
for bullet in bulletlist:
if(bullet.checkInBounds()):
if DISPLAY:
bullet.draw(DISPLAY_SURF)
bullet.update()
else:
bulletlist.remove(bullet)
def game(doDisplay, player, network):
frame_counter = 0
global DISPLAY
global DISPLAY_SURF
DISPLAY = doDisplay
if DISPLAY:
pygame.init()
DISPLAY_SURF = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption("boss simulator")
FPS = 60
FPSCLOCK = pygame.time.Clock()
npc = npcChar(player)
npc_bulletlist=[]
boss = BossChar(40, 40)
boss_bulletlist=[]
score = 0
#GameLoop
while frame_counter < MAX_FRAMES and boss.health > 0 and npc.health > 0:
"""
send inputs to neural network
"""
#list of output: [move, shoot] ouputs
inputs = [(npc.x*1.0 - boss.x*1.0)/WIN_WIDTH]
if len( npc_bulletlist ) > 0:
closest_bullet_pos = npc_bulletlist[0].x
else:
closest_bullet_pos = -1
inputs.append(boss.x*1.0/WIN_WIDTH)
inputs.append(closest_bullet_pos*1.0/WIN_WIDTH)
inputs.append(boss.health*1.0/100)
inputs.append(1)#bias
output = network.processNetwork(inputs)
"""
use output of neural network to move
"""
npc.update()
boss.update(output[0])
npc.shoot(npc_bulletlist)
boss.shoot(output[1] > 0.5, boss_bulletlist)
#check bullet collisions
boss.checkCollisions(npc_bulletlist)
npc.checkCollisions(boss_bulletlist)
if DISPLAY:
#update screen
drawBackground(DISPLAY_SURF)
npc.draw(DISPLAY_SURF)
boss.draw(DISPLAY_SURF)
#draw the score
scoreboard(DISPLAY_SURF, boss.health, npc.health)
checkBullets(npc_bulletlist)
checkBullets(boss_bulletlist)
if DISPLAY:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
return
pygame.display.update()
if npc.health <= 0:
if DISPLAY:
pygame.quit()
return (MAX_FRAMES-frame_counter) + (100-boss.health)
if boss.health <= 0:
if DISPLAY:
pygame.quit()
return -1 * (MAX_FRAMES-frame_counter) + (100-npc.health)
frame_counter+=1
#return score
if DISPLAY:
pygame.quit()
return -1*MAX_FRAMES
def scoreboard(DISPLAY_SURF, bosshealth, npchealth):
bosshealth_text = pygame.font.Font('freesansbold.ttf', 40)
bosshealth_surf = bosshealth_text.render("BOSS: "+str(bosshealth), True, WHITE)
bosshealth_rect = bosshealth_surf.get_rect()
bosshealth_rect.topleft = (0,0)
npchealth_text = pygame.font.Font('freesansbold.ttf',40)
npchealth_surf = npchealth_text.render("NPC: "+str(npchealth), True, WHITE)
npchealth_rect = npchealth_surf.get_rect()
npchealth_rect.topright = (WIN_WIDTH, 0)
DISPLAY_SURF.blit(bosshealth_surf, bosshealth_rect)
DISPLAY_SURF.blit(npchealth_surf, npchealth_rect)
def main():
#plays a round of the simulation displayed
print game(False, npc6(20,20), network(), 1000)
sys.exit()
if __name__ == "__main__":
main()
| |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for Scanner DAO."""
from datetime import datetime
from datetime import timedelta
import hashlib
import json
import os
import unittest
import unittest.mock as mock
from sqlalchemy.orm import sessionmaker
from tests.services.scanner import scanner_base_db
from tests.services.scanner.test_data import config_validator_violations
from tests.services.util.db import create_test_engine_with_file
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util.index_state import IndexState
from google.cloud.forseti.services.scanner import dao as scanner_dao
# pylint: disable=bad-indentation
class ScannerDaoTest(scanner_base_db.ScannerBaseDbTestCase):
"""Test scanner data access."""
def setUp(self):
"""Setup method."""
super(ScannerDaoTest, self).setUp()
self.maxDiff = None
# Used in hashing tests.
self.test_violation_full_name = ''
self.test_inventory_data = ''
self.test_violation_data = {}
self.test_rule_name = ''
def test_save_violations(self):
"""Test violations can be saved."""
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
saved_violations = self.violation_access.list(
scanner_index_id=scanner_index_id)
expected_hash_values = [(
'f3eb2be2ed015563d7dc4d4aea798a0b269b76ea590a7672b43113428d48da943'
'f2f5a1b44ad7850aa266add296cc548df88a12a30ba307519af9b314e6eaef8'
), (
'73f4a4ac87a76a2e9d2c7854ac8fa0774fe5192b2801e9a5026fc39854bc6a49a'
'a0043b4afbcb8b1f0277da5a88e60766c35f7e9a775e9568106919de581a2cc'
)]
keys = ['scanner_index_id', 'resource_id', 'full_name',
'resource_type', 'rule_name', 'rule_index', 'violation_type',
'violation_data', 'violation_hash', 'resource_data',
'created_at_datetime']
for fake, saved in zip(scanner_base_db.FAKE_VIOLATIONS,
saved_violations):
for key in keys:
if key == 'scanner_index_id':
expected_key_value = fake.get(key, scanner_index_id)
else:
expected_key_value = fake.get(key)
saved_key_value = getattr(saved, key)
if key == 'violation_data':
self.assertEqual(
expected_key_value,
json.loads(saved_key_value),
'The key value of "%s" differs:\nExpected: %s'
'\nFound: %s' % (key, expected_key_value,
saved_key_value)
)
elif key == 'violation_hash':
self.assertIn(
saved_key_value, expected_hash_values,
'The key value of "%s" differs:\nExpected one of: %s'
'\nFound: %s' % (key, ',\n'.join(expected_hash_values),
saved_key_value)
)
elif key == 'created_at_datetime':
self.assertIsInstance(
saved_key_value, datetime,
'The key value of "%s" differs:\n Expected type: %s'
'\nFound type: %s' % (key, type(datetime),
type(saved_key_value))
)
else:
self.assertEqual(
expected_key_value, saved_key_value,
'The key value of "%s" differs:\nExpected: %s'
'\nFound: %s' % (key, expected_key_value,
saved_key_value)
)
@mock.patch.object(scanner_dao, '_create_violation_hash')
def test_convert_sqlalchemy_object_to_dict(self, mock_violation_hash):
mock_violation_hash.side_effect = [
scanner_base_db.FAKE_VIOLATION_HASH,
scanner_base_db.FAKE_VIOLATION_HASH
]
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
saved_violations = self.violation_access.list(
scanner_index_id=scanner_index_id)
converted_violations_as_dict = []
for violation in saved_violations:
converted_violations_as_dict.append(
scanner_dao.convert_sqlalchemy_object_to_dict(violation))
expected_violations_as_dict = [
{'full_name': u'full_name_111',
'id': 1,
'resource_name': 'fw-tag-match_111',
'resource_data': u'inventory_data_111',
'scanner_index_id': scanner_index_id,
'resource_id': u'fake_firewall_111',
'resource_type': u'firewall_rule',
'violation_message': u'',
'rule_index': 111,
'rule_name': u'disallow_all_ports_111',
'violation_data': (
u'{"policy_names": ["fw-tag-match_111"], '
'"recommended_actions": {"DELETE_FIREWALL_RULES": '
'["fw-tag-match_111"]}}'),
'violation_type': u'FIREWALL_BLACKLIST_VIOLATION_111',
'violation_hash': scanner_base_db.FAKE_VIOLATION_HASH,
},
{'full_name': u'full_name_222',
'id': 2,
'resource_name': 'fw-tag-match_222',
'resource_data': u'inventory_data_222',
'scanner_index_id': scanner_index_id,
'resource_id': u'fake_firewall_222',
'resource_type': u'firewall_rule',
'violation_message': u'',
'rule_index': 222,
'rule_name': u'disallow_all_ports_222',
'violation_data': (
u'{"policy_names": ["fw-tag-match_222"], '
'"recommended_actions": {"DELETE_FIREWALL_RULES": '
'["fw-tag-match_222"]}}'),
'violation_type': u'FIREWALL_BLACKLIST_VIOLATION_222',
'violation_hash': scanner_base_db.FAKE_VIOLATION_HASH,
}
]
# It's useless testing 'created_at_datetime' as we can't mock datetime
# and we only care about its type and not its value.
for violation in converted_violations_as_dict:
del violation['created_at_datetime']
self.assertEqual(expected_violations_as_dict,
converted_violations_as_dict)
self.assertEqual(mock_violation_hash.call_count,
len(scanner_base_db.FAKE_VIOLATIONS))
def test_create_violation_hash_with_default_algorithm(self):
"""Test _create_violation_hash."""
test_hash = hashlib.new('sha512')
test_hash.update(
json.dumps(self.test_violation_full_name).encode() +
json.dumps(self.test_inventory_data).encode() +
json.dumps(self.test_violation_data).encode() +
json.dumps(self.test_rule_name).encode()
)
expected_hash = test_hash.hexdigest()
returned_hash = scanner_dao._create_violation_hash(
self.test_violation_full_name,
self.test_inventory_data,
self.test_violation_data,
self.test_rule_name)
self.assertEqual(expected_hash, returned_hash)
@mock.patch.object(hashlib, 'new')
def test_create_violation_hash_with_invalid_algorithm(self, mock_hashlib):
"""Test _create_violation_hash with an invalid algorithm."""
mock_hashlib.side_effect = ValueError
returned_hash = scanner_dao._create_violation_hash(
self.test_violation_full_name,
self.test_inventory_data,
self.test_violation_data,
self.test_rule_name)
self.assertEqual('', returned_hash)
@mock.patch.object(json, 'dumps')
def test_create_violation_hash_invalid_violation_data(self, mock_json):
"""Test _create_violation_hash returns '' when it can't hash."""
expected_hash = ''
# Mock json.loads has an error, e.g. invalid violation_data data.:w
mock_json.side_effect = TypeError()
returned_hash = scanner_dao._create_violation_hash(
self.test_violation_full_name,
self.test_inventory_data,
self.test_violation_data,
self.test_rule_name)
self.assertEqual(expected_hash, returned_hash)
def test_create_violation_hash_with_inventory_data_not_string(self):
expected_hash = ('8280128e6ab64d38fcedd1554c5fef1b1adfa15314a45a98180e'
'a69dd7bcb0c53beb4e37ab089865fafe3b30fd33b41f6fbdf844'
'9d02781fc79a77f551c8cc05')
returned_hash = scanner_dao._create_violation_hash(
self.test_violation_full_name,
['aaa', 'bbb', 'ccc'],
self.test_violation_data,
self.test_rule_name)
self.assertEqual(expected_hash, returned_hash)
def test_create_violation_hash_with_full_name_not_string(self):
expected_hash = ('d21284c534c43adf89d9eb15a04ffc8431fa2410f9297ffc63d5'
'9392ce8532c86cc76c0c91a693b7f2c5271dac6a3a5713eddd29'
'fb2e71820c3755c8c29d772b')
returned_hash = scanner_dao._create_violation_hash(
None,
self.test_inventory_data,
self.test_violation_data,
self.test_rule_name)
self.assertEqual(expected_hash, returned_hash)
def test_get_latest_scanner_index_id_with_empty_table(self):
"""The method under test returns `None` if the table is empty."""
self.assertIsNone(
scanner_dao.get_latest_scanner_index_id(self.session, 123))
@mock.patch.object(date_time, 'get_utc_now_datetime')
def test_get_latest_scanner_index_id(self, mock_date_time):
"""The method under test returns the newest `ScannerIndex` row."""
time1 = datetime.utcnow()
time2 = time1 + timedelta(minutes=5)
time3 = time1 + timedelta(minutes=7)
mock_date_time.side_effect = [time1, time2, time3]
expected_id = date_time.get_utc_now_microtimestamp(time2)
self.session.add(scanner_dao.ScannerIndex.create(expected_id))
index2 = scanner_dao.ScannerIndex.create(expected_id)
index2.scanner_status = IndexState.SUCCESS
self.session.add(index2)
self.session.add(scanner_dao.ScannerIndex.create(expected_id))
self.session.flush()
self.assertEqual(
expected_id, scanner_dao.get_latest_scanner_index_id(
self.session, expected_id))
@mock.patch.object(date_time, 'get_utc_now_datetime')
def test_get_latest_scanner_index_id_with_failure_state(self,
mock_date_time):
"""The method under test returns the newest `ScannerIndex` row."""
time1 = datetime.utcnow()
time2 = time1 + timedelta(minutes=5)
time3 = time1 + timedelta(minutes=7)
mock_date_time.side_effect = [time1, time2, time3]
expected_id = date_time.get_utc_now_microtimestamp(time1)
index1 = scanner_dao.ScannerIndex.create(expected_id)
index1.scanner_status = IndexState.FAILURE
self.session.add(index1)
self.session.add(scanner_dao.ScannerIndex.create(expected_id))
self.session.add(scanner_dao.ScannerIndex.create(expected_id))
self.session.flush()
self.assertEqual(
expected_id,
scanner_dao.get_latest_scanner_index_id(
self.session, expected_id, IndexState.FAILURE))
@staticmethod
def test_map_by_resource_returns_cv_violations():
resource_map = scanner_dao.map_by_resource(
config_validator_violations.CONFIG_VALIDATOR_VIOLATIONS)
assert len(resource_map['config_validator_violations']) == 2
class ScannerIndexTest(ForsetiTestCase):
"""Test scanner data access."""
def setUp(self):
"""Setup method."""
ForsetiTestCase.setUp(self)
self.engine, self.dbfile = create_test_engine_with_file()
scanner_dao.ScannerIndex.__table__.create(bind=self.engine)
session_maker = sessionmaker()
self.session = session_maker(bind=self.engine)
def tearDown(self):
"""Teardown method."""
os.unlink(self.dbfile)
ForsetiTestCase.tearDown(self)
@mock.patch.object(date_time, 'get_utc_now_datetime')
def test_scanner_index_create(self, mock_date_time):
"""`ScannerIndex` create() works as expected."""
utc_now = datetime.utcnow()
mock_date_time.return_value = utc_now
expected_id = date_time.get_utc_now_microtimestamp(utc_now)
db_row = scanner_dao.ScannerIndex.create(expected_id)
self.assertEqual(expected_id, db_row.id)
self.assertEqual(utc_now, db_row.created_at_datetime)
self.assertEqual(IndexState.CREATED, db_row.scanner_status)
@mock.patch.object(date_time, 'get_utc_now_datetime')
def test_scanner_index_complete(self, mock_date_time):
"""`ScannerIndex` complete() works as expected."""
start = datetime.utcnow()
end = start + timedelta(minutes=5)
# ScannerIndex.create() calls get_utc_now_datetime() twice.
mock_date_time.side_effect = [start, end]
expected_id = date_time.get_utc_now_microtimestamp(start)
db_row = scanner_dao.ScannerIndex.create(expected_id)
self.assertEqual(expected_id, db_row.id)
db_row.complete()
self.assertEqual(end, db_row.completed_at_datetime)
self.assertEqual(IndexState.SUCCESS, db_row.scanner_status)
def test_scanner_index_add_warning(self):
"""`ScannerIndex` add_warning() works as expected."""
db_row = scanner_dao.ScannerIndex.create('aaa')
db_row.add_warning(self.session, '1st warning')
db_row.add_warning(self.session, '2nd warning')
self.assertEqual(
'1st warning\n2nd warning\n', db_row.scanner_index_warnings)
def test_scanner_index_set_error(self):
"""`ScannerIndex` set_error() works as expected."""
db_row = scanner_dao.ScannerIndex.create('aaa')
db_row.set_error(self.session, 'scanner error!')
self.assertEqual('scanner error!', db_row.scanner_index_errors)
class ViolationListTest(scanner_base_db.ScannerBaseDbTestCase):
"""Test the Violation.list() method."""
def test_list_without_indices(self):
self.populate_db(inv_index_id=self.inv_index_id1)
self.populate_db(inv_index_id=self.inv_index_id2)
actual_data = self.violation_access.list()
self.assertEqual(2 * len(scanner_base_db.FAKE_VIOLATIONS),
len(actual_data))
def test_list_with_inv_index_single_successful_scan(self):
self.populate_db(inv_index_id=self.inv_index_id1)
actual_data = self.violation_access.list(
inv_index_id=self.inv_index_id1)
self.assertEqual(len(scanner_base_db.FAKE_VIOLATIONS),
len(actual_data))
def test_list_with_inv_index_single_failed_scan(self):
self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
inv_index_id=self.inv_index_id1)
self.assertEqual(0, len(actual_data))
def test_list_with_inv_index_multi_mixed_success_scan(self):
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
inv_index_id=self.inv_index_id1)
self.assertEqual(len(scanner_base_db.FAKE_VIOLATIONS),
len(actual_data))
for violation in actual_data:
self.assertEqual(scanner_index_id, violation.scanner_index_id)
def test_list_with_inv_index_multi_all_success_scan(self):
self.populate_db(inv_index_id=self.inv_index_id2)
self.populate_db(inv_index_id=self.inv_index_id2)
actual_data = self.violation_access.list(
inv_index_id=self.inv_index_id2)
self.assertEqual(2 * len(scanner_base_db.FAKE_VIOLATIONS),
len(actual_data))
def test_list_with_inv_index_multi_all_failed_scan(self):
self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
inv_index_id=self.inv_index_id1)
self.assertEqual(0, len(actual_data))
def test_list_with_scnr_index_single_successful_scan(self):
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
actual_data = self.violation_access.list(
scanner_index_id=scanner_index_id)
self.assertEqual(len(scanner_base_db.FAKE_VIOLATIONS),
len(actual_data))
for violation in actual_data:
self.assertEqual(scanner_index_id, violation.scanner_index_id)
def test_list_with_scnr_index_single_failed_scan(self):
scanner_index_id = self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
scanner_index_id=scanner_index_id)
self.assertEqual(0, len(actual_data))
def test_list_with_scnr_index_multi_mixed_success_scan(self):
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
scanner_index_id=scanner_index_id)
self.assertEqual(len(scanner_base_db.FAKE_VIOLATIONS), len(actual_data)
)
for violation in actual_data:
self.assertEqual(scanner_index_id, violation.scanner_index_id)
def test_list_with_scnr_index_multi_all_success_scan(self):
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id1)
self.populate_db(inv_index_id=self.inv_index_id3)
actual_data = self.violation_access.list(
scanner_index_id=scanner_index_id)
self.assertEqual(len(scanner_base_db.FAKE_VIOLATIONS), len(actual_data)
)
for violation in actual_data:
self.assertEqual(scanner_index_id, violation.scanner_index_id)
def test_list_with_scnr_index_multi_all_failed_scan(self):
scanner_index_id = self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
self.populate_db(
inv_index_id=self.inv_index_id2, succeeded=[], failed=['IapScanner']
)
actual_data = self.violation_access.list(
scanner_index_id=scanner_index_id)
self.assertEqual(0, len(actual_data))
def test_list_with_both_indices(self):
scanner_index_id = self.populate_db(
inv_index_id=self.inv_index_id1, succeeded=[], failed=['IapScanner']
)
self.populate_db(
inv_index_id=self.inv_index_id2, succeeded=[], failed=['IapScanner']
)
with self.assertRaises(ValueError):
self.violation_access.list(
inv_index_id='blah', scanner_index_id=scanner_index_id)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import configparser
from dataclasses import dataclass
from enum import Enum
from io import StringIO
from pathlib import PurePath
from typing import Any, MutableMapping, cast
import toml
from pants.backend.python.goals import lockfile
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.target_types import ConsoleScript
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.goals.test import (
ConsoleCoverageReport,
CoverageData,
CoverageDataCollection,
CoverageReport,
CoverageReports,
FilesystemCoverageReport,
)
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestContents,
FileContent,
MergeDigests,
PathGlobs,
Snapshot,
)
from pants.engine.process import FallibleProcessResult, ProcessExecutionFailure, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.option.global_options import ProcessCleanupOption
from pants.option.option_types import (
BoolOption,
EnumListOption,
FileOption,
FloatOption,
StrListOption,
StrOption,
)
from pants.source.source_root import AllSourceRoots
from pants.util.docutil import git_url
from pants.util.logging import LogLevel
"""
An overview:
Step 1: Run each test with the appropriate `--cov` arguments.
In `python_test_runner.py`, we pass options so that the pytest-cov plugin runs and records which
lines were encountered in the test. For each test, it will save a `.coverage` file (SQLite DB
format).
Step 2: Merge the results with `coverage combine`.
We now have a bunch of individual `PytestCoverageData` values, each with their own `.coverage` file.
We run `coverage combine` to convert this into a single `.coverage` file.
Step 3: Generate the report with `coverage {html,xml,console}`.
All the files in the single merged `.coverage` file are still stripped, and we want to generate a
report with the source roots restored. Coverage requires that the files it's reporting on be present
when it generates the report, so we populate all the source files.
Step 4: `test.py` outputs the final report.
"""
class CoverageReportType(Enum):
CONSOLE = ("console", "report")
XML = ("xml", None)
HTML = ("html", None)
RAW = ("raw", None)
JSON = ("json", None)
_report_name: str
def __new__(cls, value: str, report_name: str | None = None) -> CoverageReportType:
member: CoverageReportType = object.__new__(cls)
member._value_ = value
member._report_name = report_name if report_name is not None else value
return member
@property
def report_name(self) -> str:
return self._report_name
@property
def value(self) -> str:
return cast(str, super().value)
class CoverageSubsystem(PythonToolBase):
options_scope = "coverage-py"
help = "Configuration for Python test coverage measurement."
default_version = "coverage[toml]>=5.5,<5.6"
default_main = ConsoleScript("coverage")
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.6,<4"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.subsystems", "coverage_py_lockfile.txt")
default_lockfile_path = "src/python/pants/backend/python/subsystems/coverage_py_lockfile.txt"
default_lockfile_url = git_url(default_lockfile_path)
filter = StrListOption(
"--filter",
help=(
"A list of Python modules or filesystem paths to use in the coverage report, e.g. "
"`['helloworld_test', 'helloworld/util/dirutil'].\n\nBoth modules and directory "
"paths are recursive: any submodules or child paths, respectively, will be "
"included.\n\nIf you leave this off, the coverage report will include every file "
"in the transitive closure of the address/file arguments; for example, `test ::` "
"will include every Python file in your project, whereas "
"`test project/app_test.py` will include `app_test.py` and any of its transitive "
"dependencies."
),
)
reports = EnumListOption(
"--report",
default=[CoverageReportType.CONSOLE],
help="Which coverage report type(s) to emit.",
)
_output_dir = StrOption(
"--output-dir",
default=str(PurePath("dist", "coverage", "python")),
advanced=True,
help="Path to write the Pytest Coverage report to. Must be relative to build root.",
)
config = FileOption(
"--config",
default=None,
advanced=True,
help=lambda cls: (
"Path to an INI or TOML config file understood by coverage.py "
"(https://coverage.readthedocs.io/en/stable/config.html).\n\n"
f"Setting this option will disable `[{cls.options_scope}].config_discovery`. Use "
f"this option if the config is located in a non-standard location."
),
)
config_discovery = BoolOption(
"--config-discovery",
default=True,
advanced=True,
help=lambda cls: (
"If true, Pants will include any relevant config files during runs "
"(`.coveragerc`, `setup.cfg`, `tox.ini`, and `pyproject.toml`)."
f"\n\nUse `[{cls.options_scope}].config` instead if your config is in a "
f"non-standard location."
),
)
global_report = BoolOption(
"--global-report",
default=False,
help=(
"If true, Pants will generate a global coverage report.\n\nThe global report will "
"include all Python source files in the workspace and not just those depended on "
"by the tests that were run."
),
)
fail_under = FloatOption(
"--fail-under",
default=None,
help=(
"Fail if the total combined coverage percentage for all tests is less than this "
"number.\n\nUse this instead of setting fail_under in a coverage.py config file, "
"as the config will apply to each test separately, while you typically want this "
"to apply to the combined coverage for all tests run."
"\n\nNote that you must generate at least one (non-raw) coverage report for this "
"check to trigger.\n\nNote also that if you specify a non-integral value, you must "
"also set [report] precision properly in the coverage.py config file to make use "
"of the decimal places. See https://coverage.readthedocs.io/en/latest/config.html ."
),
)
@property
def output_dir(self) -> PurePath:
return PurePath(self._output_dir)
@property
def config_request(self) -> ConfigFilesRequest:
# Refer to https://coverage.readthedocs.io/en/stable/config.html.
return ConfigFilesRequest(
specified=self.config,
specified_option_name=f"[{self.options_scope}].config",
discovery=self.config_discovery,
check_existence=[".coveragerc"],
check_content={
"setup.cfg": b"[coverage:",
"tox.ini": b"[coverage:]",
"pyproject.toml": b"[tool.coverage",
},
)
class CoveragePyLockfileSentinel(GenerateToolLockfileSentinel):
resolve_name = CoverageSubsystem.options_scope
@rule
def setup_coverage_lockfile(
_: CoveragePyLockfileSentinel, coverage: CoverageSubsystem
) -> GeneratePythonLockfile:
return GeneratePythonLockfile.from_tool(coverage)
@dataclass(frozen=True)
class PytestCoverageData(CoverageData):
address: Address
digest: Digest
class PytestCoverageDataCollection(CoverageDataCollection):
element_type = PytestCoverageData
@dataclass(frozen=True)
class CoverageConfig:
digest: Digest
path: str
def _validate_and_update_config(
coverage_config: configparser.ConfigParser, config_path: str | None
) -> None:
if not coverage_config.has_section("run"):
coverage_config.add_section("run")
run_section = coverage_config["run"]
relative_files_str = run_section.get("relative_files", "True")
if relative_files_str.lower() != "true":
raise ValueError(
"relative_files under the 'run' section must be set to True in the config "
f"file {config_path}"
)
coverage_config.set("run", "relative_files", "True")
omit_elements = list(run_section.get("omit", "").split("\n")) or ["\n"]
if "pytest.pex/*" not in omit_elements:
omit_elements.append("pytest.pex/*")
run_section["omit"] = "\n".join(omit_elements)
class InvalidCoverageConfigError(Exception):
pass
def _parse_toml_config(fc: FileContent) -> MutableMapping[str, Any]:
try:
return toml.loads(fc.content.decode())
except toml.TomlDecodeError as exc:
raise InvalidCoverageConfigError(
f"Failed to parse the coverage.py config `{fc.path}` as TOML. Please either fix "
f"the config or update `[coverage-py].config` and/or "
f"`[coverage-py].config_discovery`.\n\nParse error: {repr(exc)}"
)
def _parse_ini_config(fc: FileContent) -> configparser.ConfigParser:
cp = configparser.ConfigParser()
try:
cp.read_string(fc.content.decode())
return cp
except configparser.Error as exc:
raise InvalidCoverageConfigError(
f"Failed to parse the coverage.py config `{fc.path}` as INI. Please either fix "
f"the config or update `[coverage-py].config` and/or `[coverage-py].config_discovery`."
f"\n\nParse error: {repr(exc)}"
)
def _update_config(fc: FileContent) -> FileContent:
if PurePath(fc.path).suffix == ".toml":
all_config = _parse_toml_config(fc)
tool = all_config.setdefault("tool", {})
coverage = tool.setdefault("coverage", {})
run = coverage.setdefault("run", {})
run["relative_files"] = True
if "pytest.pex/*" not in run.get("omit", []):
run["omit"] = [*run.get("omit", []), "pytest.pex/*"]
return FileContent(fc.path, toml.dumps(all_config).encode())
cp = _parse_ini_config(fc)
run_section = "coverage:run" if fc.path in ("tox.ini", "setup.cfg") else "run"
if not cp.has_section(run_section):
cp.add_section(run_section)
cp.set(run_section, "relative_files", "True")
omit_elements = cp[run_section].get("omit", "").split("\n") or ["\n"]
if "pytest.pex/*" not in omit_elements:
omit_elements.append("pytest.pex/*")
cp.set(run_section, "omit", "\n".join(omit_elements))
stream = StringIO()
cp.write(stream)
return FileContent(fc.path, stream.getvalue().encode())
def get_branch_value_from_config(fc: FileContent) -> bool:
# Note that coverage's default value for the branch setting is False, which we mirror here.
if PurePath(fc.path).suffix == ".toml":
all_config = _parse_toml_config(fc)
return bool(
all_config.get("tool", {}).get("coverage", {}).get("run", {}).get("branch", False)
)
cp = _parse_ini_config(fc)
run_section = "coverage:run" if fc.path in ("tox.ini", "setup.cfg") else "run"
if not cp.has_section(run_section):
return False
return cp.getboolean(run_section, "branch", fallback=False)
@rule
async def create_or_update_coverage_config(coverage: CoverageSubsystem) -> CoverageConfig:
config_files = await Get(ConfigFiles, ConfigFilesRequest, coverage.config_request)
if config_files.snapshot.files:
digest_contents = await Get(DigestContents, Digest, config_files.snapshot.digest)
file_content = _update_config(digest_contents[0])
else:
cp = configparser.ConfigParser()
cp.add_section("run")
cp.set("run", "relative_files", "True")
cp.set("run", "omit", "\npytest.pex/*")
stream = StringIO()
cp.write(stream)
file_content = FileContent(".coveragerc", stream.getvalue().encode())
digest = await Get(Digest, CreateDigest([file_content]))
return CoverageConfig(digest, file_content.path)
@dataclass(frozen=True)
class CoverageSetup:
pex: VenvPex
@rule
async def setup_coverage(coverage: CoverageSubsystem) -> CoverageSetup:
pex = await Get(VenvPex, PexRequest, coverage.to_pex_request())
return CoverageSetup(pex)
@dataclass(frozen=True)
class MergedCoverageData:
coverage_data: Digest
addresses: tuple[Address, ...]
@rule(desc="Merge Pytest coverage data", level=LogLevel.DEBUG)
async def merge_coverage_data(
data_collection: PytestCoverageDataCollection,
coverage_setup: CoverageSetup,
coverage_config: CoverageConfig,
coverage: CoverageSubsystem,
source_roots: AllSourceRoots,
) -> MergedCoverageData:
if len(data_collection) == 1 and not coverage.global_report:
coverage_data = data_collection[0]
return MergedCoverageData(coverage_data.digest, (coverage_data.address,))
coverage_digest_gets = []
coverage_data_file_paths = []
addresses = []
for data in data_collection:
# We prefix each .coverage file with its corresponding address to avoid collisions.
coverage_digest_gets.append(
Get(Digest, AddPrefix(data.digest, prefix=data.address.path_safe_spec))
)
coverage_data_file_paths.append(f"{data.address.path_safe_spec}/.coverage")
addresses.append(data.address)
if coverage.global_report:
# It's important to set the `branch` value in the empty base report to the value it will
# have when running on real inputs, so that the reports are of the same type, and can be
# merged successfully. Otherwise we may get "Can't combine arc data with line data" errors.
# See https://github.com/pantsbuild/pants/issues/14542 .
config_contents = await Get(DigestContents, Digest, coverage_config.digest)
branch = get_branch_value_from_config(config_contents[0]) if config_contents else False
global_coverage_base_dir = PurePath("__global_coverage__")
global_coverage_config_path = global_coverage_base_dir / "pyproject.toml"
global_coverage_config_content = toml.dumps(
{
"tool": {
"coverage": {
"run": {
"relative_files": True,
"source": [source_root.path for source_root in source_roots],
"branch": branch,
}
}
}
}
).encode()
no_op_exe_py_path = global_coverage_base_dir / "no-op-exe.py"
all_sources_digest, no_op_exe_py_digest, global_coverage_config_digest = await MultiGet(
Get(
Digest,
PathGlobs(globs=[f"{source_root.path}/**/*.py" for source_root in source_roots]),
),
Get(Digest, CreateDigest([FileContent(path=str(no_op_exe_py_path), content=b"")])),
Get(
Digest,
CreateDigest(
[
FileContent(
path=str(global_coverage_config_path),
content=global_coverage_config_content,
),
]
),
),
)
extra_sources_digest = await Get(
Digest, MergeDigests((all_sources_digest, no_op_exe_py_digest))
)
input_digest = await Get(
Digest, MergeDigests((extra_sources_digest, global_coverage_config_digest))
)
result = await Get(
ProcessResult,
VenvPexProcess(
coverage_setup.pex,
argv=("run", "--rcfile", str(global_coverage_config_path), str(no_op_exe_py_path)),
input_digest=input_digest,
output_files=(".coverage",),
description="Create base global Pytest coverage report.",
level=LogLevel.DEBUG,
),
)
coverage_digest_gets.append(
Get(
Digest, AddPrefix(digest=result.output_digest, prefix=str(global_coverage_base_dir))
)
)
coverage_data_file_paths.append(str(global_coverage_base_dir / ".coverage"))
else:
extra_sources_digest = EMPTY_DIGEST
input_digest = await Get(Digest, MergeDigests(await MultiGet(coverage_digest_gets)))
result = await Get(
ProcessResult,
VenvPexProcess(
coverage_setup.pex,
# We tell combine to keep the original input files, to aid debugging in the sandbox.
argv=("combine", "--keep", *sorted(coverage_data_file_paths)),
input_digest=input_digest,
output_files=(".coverage",),
description=f"Merge {len(coverage_data_file_paths)} Pytest coverage reports.",
level=LogLevel.DEBUG,
),
)
return MergedCoverageData(
await Get(Digest, MergeDigests((result.output_digest, extra_sources_digest))),
tuple(addresses),
)
@rule(desc="Generate Pytest coverage reports", level=LogLevel.DEBUG)
async def generate_coverage_reports(
merged_coverage_data: MergedCoverageData,
coverage_setup: CoverageSetup,
coverage_config: CoverageConfig,
coverage_subsystem: CoverageSubsystem,
process_cleanup: ProcessCleanupOption,
) -> CoverageReports:
"""Takes all Python test results and generates a single coverage report."""
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(merged_coverage_data.addresses)
)
sources = await Get(
PythonSourceFiles,
# Coverage sometimes includes non-Python files in its `.coverage` data. We need to
# ensure that they're present when generating the report. We include all the files included
# by `pytest_runner.py`.
PythonSourceFilesRequest(
transitive_targets.closure, include_files=True, include_resources=True
),
)
input_digest = await Get(
Digest,
MergeDigests(
(
merged_coverage_data.coverage_data,
coverage_config.digest,
sources.source_files.snapshot.digest,
)
),
)
pex_processes = []
report_types = []
result_snapshot = await Get(Snapshot, Digest, merged_coverage_data.coverage_data)
coverage_reports: list[CoverageReport] = []
for report_type in coverage_subsystem.reports:
if report_type == CoverageReportType.RAW:
coverage_reports.append(
FilesystemCoverageReport(
# We don't know yet if the coverage is sufficient, so we let some other report
# trigger the failure if necessary.
coverage_insufficient=False,
report_type=CoverageReportType.RAW.value,
result_snapshot=result_snapshot,
directory_to_materialize_to=coverage_subsystem.output_dir,
report_file=coverage_subsystem.output_dir / ".coverage",
)
)
continue
report_types.append(report_type)
output_file = (
f"coverage.{report_type.value}"
if report_type in {CoverageReportType.XML, CoverageReportType.JSON}
else None
)
args = [report_type.report_name, f"--rcfile={coverage_config.path}"]
if coverage_subsystem.fail_under is not None:
args.append(f"--fail-under={coverage_subsystem.fail_under}")
pex_processes.append(
VenvPexProcess(
coverage_setup.pex,
argv=tuple(args),
input_digest=input_digest,
output_directories=("htmlcov",) if report_type == CoverageReportType.HTML else None,
output_files=(output_file,) if output_file else None,
description=f"Generate Pytest {report_type.report_name} coverage report.",
level=LogLevel.DEBUG,
)
)
results = await MultiGet(
Get(FallibleProcessResult, VenvPexProcess, process) for process in pex_processes
)
for proc, res in zip(pex_processes, results):
if res.exit_code not in {0, 2}:
# coverage.py uses exit code 2 if --fail-under triggers, in which case the
# reports are still generated.
raise ProcessExecutionFailure(
res.exit_code,
res.stdout,
res.stderr,
proc.description,
process_cleanup=process_cleanup.val,
)
# In practice if one result triggers --fail-under, they all will, but no need to rely on that.
result_exit_codes = tuple(res.exit_code for res in results)
result_stdouts = tuple(res.stdout for res in results)
result_snapshots = await MultiGet(Get(Snapshot, Digest, res.output_digest) for res in results)
coverage_reports.extend(
_get_coverage_report(
coverage_subsystem.output_dir, report_type, exit_code != 0, stdout, snapshot
)
for (report_type, exit_code, stdout, snapshot) in zip(
report_types, result_exit_codes, result_stdouts, result_snapshots
)
)
return CoverageReports(tuple(coverage_reports))
def _get_coverage_report(
output_dir: PurePath,
report_type: CoverageReportType,
coverage_insufficient: bool,
result_stdout: bytes,
result_snapshot: Snapshot,
) -> CoverageReport:
if report_type == CoverageReportType.CONSOLE:
return ConsoleCoverageReport(coverage_insufficient, result_stdout.decode())
report_file: PurePath | None
if report_type == CoverageReportType.HTML:
report_file = output_dir / "htmlcov" / "index.html"
elif report_type == CoverageReportType.XML:
report_file = output_dir / "coverage.xml"
elif report_type == CoverageReportType.JSON:
report_file = output_dir / "coverage.json"
else:
raise ValueError(f"Invalid coverage report type: {report_type}")
return FilesystemCoverageReport(
coverage_insufficient=coverage_insufficient,
report_type=report_type.value,
result_snapshot=result_snapshot,
directory_to_materialize_to=output_dir,
report_file=report_file,
)
def rules():
return [
*collect_rules(),
*lockfile.rules(),
UnionRule(CoverageDataCollection, PytestCoverageDataCollection),
UnionRule(GenerateToolLockfileSentinel, CoveragePyLockfileSentinel),
]
| |
import unittest
from robot.variables import VariableSplitter, VariableIterator
from robot.utils.asserts import assert_equal, assert_false, assert_true
class TestVariableSplitter(unittest.TestCase):
_identifiers = ['$', '@', '%', '&', '*']
def test_empty(self):
self._test('')
self._test(' ')
def test_no_vars(self):
for inp in ['hello world', '$hello', '{hello}', '$\\{hello}',
'${hello', '$hello}', 'a bit longer sting here']:
self._test(inp)
def test_not_string(self):
self._test(42)
self._test([1, 2, 3])
def test_backslashes(self):
for inp in ['\\', '\\\\', '\\\\\\\\\\', '\\hello\\\\world\\\\\\']:
self._test(inp)
def test_one_var(self):
self._test('${hello}', '${hello}', 0)
self._test('1 @{hello} more', '@{hello}', 2)
self._test('*{hi}}', '*{hi}', 0)
self._test('{%{{hi}}', '%{{hi}', 1)
self._test('-= ${} =-', '${}', 3)
# In this case splitter thinks there are internal but there aren't.
# Better check would probably spent more time than that is saved when
# variable base is processed again in this special case.
self._test('%{hi%{u}', '%{hi%{u}', 0, internal=True)
def test_escape_internal_closing_curly(self):
self._test(r'${embed:\d{2\}}', '${embed:\d{2\}}')
self._test(r'{}{${e:\d\{4\}-\d{2\}-\d{2\}}}}',
'${e:\d\{4\}-\d{2\}-\d{2\}}', start=3)
self._test(r'$&{\{\}{\}\\}{}', r'&{\{\}{\}\\}', start=1)
self._test(r'${&{\}{\\\\}\\}}{}', r'${&{\}{\\\\}\\}', internal=True)
def test_no_unescaped_internal_closing_curly(self):
self._test(r'${x\}')
self._test(r'${x\\\}')
self._test(r'${x\\\\\\\}')
def test_uneven_curlys(self):
self._test('${x:{}', '${x:{}')
self._test('${x:{{}}', '${x:{{}')
self._test('xx${x:{}xx', '${x:{}', start=2)
self._test('{%{{}{{}}{{', '%{{}', start=1)
def test_multiple_vars(self):
self._test('${hello} ${world}', '${hello}', 0)
self._test('hi %{u}2 and @{u2} and also *{us3}', '%{u}', 3)
self._test('0123456789 %{1} and @{2', '%{1}', 11)
def test_escaped_var(self):
self._test('\\${hello}')
self._test('hi \\\\\\${hello} moi')
def test_not_escaped_var(self):
self._test('\\\\${hello}', '${hello}', 2)
self._test('\\hi \\\\\\\\\\\\${hello} moi', '${hello}',
len('\\hi \\\\\\\\\\\\'))
self._test('\\ ${hello}', '${hello}', 2)
self._test('${hello}\\', '${hello}', 0)
self._test('\\ \\ ${hel\\lo}\\', '${hel\\lo}', 4)
def test_escaped_and_not_escaped_vars(self):
for inp, var, start in [
('\\${esc} ${not}', '${not}', len('\\${esc} ')),
('\\\\\\${esc} \\\\${not}', '${not}',
len('\\\\\\${esc} \\\\')),
('\\${esc}\\\\${not}${n2}', '${not}', len('\\${esc}\\\\'))]:
self._test(inp, var, start)
def test_internal_vars(self):
for inp, var, start in [
('${hello${hi}}', '${hello${hi}}', 0),
('bef ${${hi}hello} aft', '${${hi}hello}', 4),
('\\${not} ${hel${hi}lo} ', '${hel${hi}lo}', len('\\${not} ')),
('${${hi}${hi}}\\', '${${hi}${hi}}', 0),
('${${hi${hi}}} ${xx}', '${${hi${hi}}}', 0),
('${xx} ${${hi${hi}}}', '${xx}', 0),
('${\\${hi${hi}}}', '${\\${hi${hi}}}', 0),
('\\${${hi${hi}}}', '${hi${hi}}', len('\\${')),
('\\${\\${hi\\\\${hi}}}', '${hi}', len('\\${\\${hi\\\\'))]:
self._test(inp, var, start, internal=var.count('{') > 1)
def test_list_index(self):
self._test('@{x}[0]', '@{x}', index='0')
self._test('.@{x}[42]..', '@{x}', start=1, index='42')
self._test('@{x}[]', '@{x}', index='')
self._test('@{x}[inv]', '@{x}', index='inv')
self._test('@{x}[0', '@{x}')
self._test('@{x}}[0]', '@{x}')
def test_list_index_with_internal_vars(self):
self._test('@{x}[${i}]', '@{x}', index='${i}')
self._test('xx @{x}[${i}] ${xyz}', '@{x}', start=3, index='${i}')
self._test('@@@@@{X{X}[${${i}-${${${i}}}}]', '@{X{X}', start=4,
index='${${i}-${${${i}}}}')
self._test('@{${i}}[${j{}]', '@{${i}}', index='${j{}', internal=True)
def test_dict_index(self):
self._test('&{x}[key]', '&{x}', index='key')
self._test('.&{x}[42]..', '&{x}', start=1, index='42')
self._test('&{x}[]', '&{x}', index='')
self._test('&{x}[k', '&{x}')
self._test('&{x}}[0]', '&{x}')
def test_dict_index_with_internal_vars(self):
self._test('&{x}[${i}]', '&{x}', index='${i}')
self._test('xx &{x}[${i}] ${xyz}', '&{x}', start=3, index='${i}')
self._test('&&&&&{X{X}[${${i}-${${${i}}}}]', '&{X{X}', start=4,
index='${${i}-${${${i}}}}')
self._test('&{${i}}[${j{}]', '&{${i}}', index='${j{}', internal=True)
def test_no_index_with_others_vars(self):
self._test('${x}[0]', '${x}')
self._test('%{x}[0]', '%{x}')
self._test('*{x}[0]', '*{x}')
def test_custom_identifiers(self):
for inp, start in [('@{x}${y}', 4),
('%{x} ${y}', 5),
('*{x}567890${y}', 10),
('&{x}%{x}@{x}\\${x}${y}',
len('&{x}%{x}@{x}\\${x}'))]:
self._test(inp, '${y}', start, identifiers=['$'])
def test_identifier_as_variable_name(self):
for i in self._identifiers:
for count in 1, 2, 3, 42:
var = '%s{%s}' % (i, i*count)
self._test(var, var)
self._test(var+'spam', var)
self._test('eggs'+var+'spam', var, start=4)
self._test(i+var+i, var, start=1)
def test_identifier_as_variable_name_with_internal_vars(self):
for i in self._identifiers:
for count in 1, 2, 3, 42:
var = '%s{%s{%s}}' % (i, i*count, i)
self._test(var, var, internal=True)
self._test('eggs'+var+'spam', var, start=4, internal=True)
var = '%s{%s{%s}}' % (i, i*count, i*count)
self._test(var, var, internal=True)
self._test('eggs'+var+'spam', var, start=4, internal=True)
def test_many_possible_starts_and_ends(self):
self._test('{}'*10000)
self._test('{{}}'*1000 + '${var}', '${var}', start=4000)
def _test(self, inp, variable=None, start=0, index=None,
identifiers=_identifiers, internal=False):
if variable is None:
identifier = base = None
start = end = -1
is_var = is_list_var = is_dict_var = False
else:
identifier = variable[0]
base = variable[2:-1]
end = start + len(variable)
is_var = inp == variable
is_list_var = is_var and inp[0] == '@'
is_dict_var = is_var and inp[0] == '&'
if index is not None:
end += len(index) + 2
is_var = inp == '%s[%s]' % (variable, index)
res = VariableSplitter(inp, identifiers)
assert_equal(res.base, base, "'%s' base" % inp)
assert_equal(res.start, start, "'%s' start" % inp)
assert_equal(res.end, end, "'%s' end" % inp)
assert_equal(res.identifier, identifier, "'%s' identifier" % inp)
assert_equal(res.index, index, "'%s' index" % inp)
assert_equal(res._may_have_internal_variables, internal,
"'%s' internal" % inp)
assert_equal(res.is_variable(), is_var)
assert_equal(res.is_list_variable(), is_list_var)
assert_equal(res.is_dict_variable(), is_dict_var)
def test_is_variable(self):
for no in ['', 'xxx', '${var} not alone', '\\${notvat}', '\\\\${var}',
'${var}xx}', '${x}${y}']:
assert_false(VariableSplitter(no).is_variable())
for yes in ['${var}', '${var${}', '${var${internal}}', '@{var}',
'@{var}[0]']:
assert_true(VariableSplitter(yes).is_variable())
def test_is_list_variable(self):
for no in ['', 'xxx', '${var} not alone', '\\${notvat}', '\\\\${var}',
'${var}xx}', '${x}${y}', '@{list}[0]']:
assert_false(VariableSplitter(no).is_list_variable())
assert_true(VariableSplitter('@{list}').is_list_variable())
class TestVariableIterator(unittest.TestCase):
def test_no_variables(self):
iterator = VariableIterator('no vars here', identifiers='$')
assert_equal(list(iterator), [])
assert_equal(bool(iterator), False)
assert_equal(len(iterator), 0)
def test_one_variable(self):
iterator = VariableIterator('one ${var} here', identifiers='$')
assert_equal(list(iterator), [('one ', '${var}', ' here')])
assert_equal(bool(iterator), True)
assert_equal(len(iterator), 1)
def test_multiple_variables(self):
iterator = VariableIterator('${1} @{2} and %{3}', identifiers='$@%')
assert_equal(list(iterator), [('', '${1}', ' @{2} and %{3}'),
(' ', '@{2}', ' and %{3}'),
(' and ', '%{3}', '')])
assert_equal(bool(iterator), True)
assert_equal(len(iterator), 3)
def test_can_be_iterated_many_times(self):
iterator = VariableIterator('one ${var} here', identifiers='$')
assert_equal(list(iterator), [('one ', '${var}', ' here')])
assert_equal(list(iterator), [('one ', '${var}', ' here')])
assert_equal(bool(iterator), True)
assert_equal(bool(iterator), True)
assert_equal(len(iterator), 1)
assert_equal(len(iterator), 1)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2016 Jamie Bull
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""Run functions for EnergyPlus.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import platform
import pydoc
import shutil
from subprocess import CalledProcessError, check_call
import sys
import tempfile
try:
import multiprocessing as mp
except ImportError:
pass
def install_paths(version=None, iddname=None):
"""Get the install paths for EnergyPlus executable and weather files.
We prefer to get the install path from the IDD name but fall back to
getting it from the version number for backwards compatibility and to
simplify tests.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
iddname : str, optional
File path to the IDD.
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_weather : str
Full path to the EnergyPlus weather directory.
"""
try:
eplus_exe, eplus_home = paths_from_iddname(iddname)
except (AttributeError, TypeError, ValueError):
eplus_exe, eplus_home = paths_from_version(version)
eplus_weather = os.path.join(eplus_home, 'WeatherData')
return eplus_exe, eplus_weather
def paths_from_iddname(iddname):
"""Get the EnergyPlus install directory and executable path.
Parameters
----------
iddname : str, optional
File path to the IDD.
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_home : str
Full path to the EnergyPlus install directory.
Raises
------
AttributeError (TypeError on Windows)
If iddname does not have a directory component (e.g. if None).
ValueError
If eplus_exe is not a file.
"""
eplus_home = os.path.abspath(os.path.dirname(iddname))
if platform.system() == 'Windows':
eplus_exe = os.path.join(eplus_home, 'energyplus.exe')
elif platform.system() == "Linux":
eplus_exe = os.path.join(eplus_home, 'energyplus')
else:
eplus_exe = os.path.join(eplus_home, 'energyplus')
if not os.path.isfile(eplus_exe):
raise ValueError
return eplus_exe, eplus_home
def paths_from_version(version):
"""Get the EnergyPlus install directory and executable path.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_home : str
Full path to the EnergyPlus install directory.
"""
if platform.system() == 'Windows':
eplus_home = "C:/EnergyPlusV{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus.exe')
elif platform.system() == "Linux":
eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus')
else:
eplus_home = "/Applications/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus')
return eplus_exe, eplus_home
def wrapped_help_text(wrapped_func):
"""Decorator to pass through the documentation from a wrapped function.
"""
def decorator(wrapper_func):
"""The decorator.
Parameters
----------
f : callable
The wrapped function.
"""
wrapper_func.__doc__ = ('This method wraps the following method:\n\n' +
pydoc.text.document(wrapped_func))
return wrapper_func
return decorator
def runIDFs(jobs, processors=1):
"""Wrapper for run() to be used when running IDF5 runs in parallel.
Parameters
----------
jobs : iterable
A list or generator made up of an IDF5 object and a kwargs dict
(see `run_functions.run` for valid keywords).
processors : int, optional
Number of processors to run on (default: 1). If 0 is passed then
the process will run on all CPUs, -1 means one less than all CPUs, etc.
"""
if processors <= 0:
processors = max(1, mp.cpu_count() - processors)
shutil.rmtree("multi_runs", ignore_errors=True)
os.mkdir("multi_runs")
prepared_runs = (prepare_run(run_id, run_data) for run_id, run_data in enumerate(jobs))
try:
pool = mp.Pool(processors)
pool.map(multirunner, prepared_runs)
pool.close()
except NameError:
# multiprocessing not present so pass the jobs one at a time
for job in prepared_runs:
multirunner([job])
shutil.rmtree("multi_runs", ignore_errors=True)
def prepare_run(run_id, run_data):
"""Prepare run inputs for one of multiple EnergyPlus runs.
:param run_id: An ID number for naming the IDF.
:param run_data: Tuple of the IDF and keyword args to pass to EnergyPlus executable.
:return: Tuple of the IDF path and EPW, and the keyword args.
"""
idf, kwargs = run_data
epw = idf.epw
idf_dir = os.path.join('multi_runs', 'idf_%i' % run_id)
os.mkdir(idf_dir)
idf_path = os.path.join(idf_dir, 'in.idf')
idf.saveas(idf_path)
return (idf_path, epw), kwargs
def multirunner(args):
"""Wrapper for run() to be used when running IDF and EPW runs in parallel.
Parameters
----------
args : list
A list made up of a two-item list (IDF and EPW) and a kwargs dict.
"""
run(*args[0], **args[1])
def run(idf=None, weather=None, output_directory='', annual=False,
design_day=False, idd=None, epmacro=False, expandobjects=False,
readvars=False, output_prefix=None, output_suffix=None, version=False,
verbose='v', ep_version=None):
"""
Wrapper around the EnergyPlus command line interface.
Parameters
----------
idf : str
Full or relative path to the IDF file to be run, or an IDF object.
weather : str
Full or relative path to the weather file.
output_directory : str, optional
Full or relative path to an output directory (default: 'run_outputs)
annual : bool, optional
If True then force annual simulation (default: False)
design_day : bool, optional
Force design-day-only simulation (default: False)
idd : str, optional
Input data dictionary (default: Energy+.idd in EnergyPlus directory)
epmacro : str, optional
Run EPMacro prior to simulation (default: False).
expandobjects : bool, optional
Run ExpandObjects prior to simulation (default: False)
readvars : bool, optional
Run ReadVarsESO after simulation (default: False)
output_prefix : str, optional
Prefix for output file names (default: eplus)
output_suffix : str, optional
Suffix style for output file names (default: L)
L: Legacy (e.g., eplustbl.csv)
C: Capital (e.g., eplusTable.csv)
D: Dash (e.g., eplus-table.csv)
version : bool, optional
Display version information (default: False)
verbose: str
Set verbosity of runtime messages (default: v)
v: verbose
q: quiet
ep_version: str
EnergyPlus version, used to find install directory. Required if run() is
called with an IDF file path rather than an IDF object.
Returns
-------
str : status
Raises
------
CalledProcessError
AttributeError
If no ep_version parameter is passed when calling with an IDF file path
rather than an IDF object.
"""
args = locals().copy()
# get unneeded params out of args ready to pass the rest to energyplus.exe
verbose = args.pop('verbose')
idf = args.pop('idf')
iddname = args.get('idd')
if not isinstance(iddname, str):
args.pop('idd')
try:
idf_path = os.path.abspath(idf.idfname)
except AttributeError:
idf_path = os.path.abspath(idf)
ep_version = args.pop('ep_version')
# get version from IDF object or by parsing the IDF file for it
if not ep_version:
try:
ep_version = '-'.join(str(x) for x in idf.idd_version[:3])
except AttributeError:
raise AttributeError(
"The ep_version must be set when passing an IDF path. \
Alternatively, use IDF.run()")
eplus_exe_path, eplus_weather_path = install_paths(ep_version, iddname)
if version:
# just get EnergyPlus version number and return
cmd = [eplus_exe_path, '--version']
check_call(cmd)
return
# convert paths to absolute paths if required
if os.path.isfile(args['weather']):
args['weather'] = os.path.abspath(args['weather'])
else:
args['weather'] = os.path.join(eplus_weather_path, args['weather'])
output_dir = os.path.abspath(args['output_directory'])
args['output_directory'] = output_dir
# store the directory we start in
cwd = os.getcwd()
run_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(run_dir)
# build a list of command line arguments
cmd = [eplus_exe_path]
for arg in args:
if args[arg]:
if isinstance(args[arg], bool):
args[arg] = ''
cmd.extend(['--{}'.format(arg.replace('_', '-'))])
if args[arg] != "":
cmd.extend([args[arg]])
cmd.extend([idf_path])
try:
if verbose == 'v':
print("\r\n" + " ".join(cmd) + "\r\n")
check_call(cmd)
elif verbose == 'q':
check_call(cmd, stdout=open(os.devnull, 'w'))
except CalledProcessError:
message = parse_error(output_dir)
raise EnergyPlusRunError(message)
finally:
os.chdir(cwd)
return 'OK'
def parse_error(output_dir):
"""Add contents of stderr and eplusout.err and put it in the exception message.
:param output_dir: str
:return: str
"""
sys.stderr.seek(0)
std_err = sys.stderr.read().decode('utf-8')
err_file = os.path.join(output_dir, "eplusout.err")
if os.path.isfile(err_file):
with open(err_file, "r") as f:
ep_err = f.read()
else:
ep_err = "<File not found>"
message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(**locals())
return message
class EnergyPlusRunError(Exception):
pass
| |
from __future__ import absolute_import
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import GEOSGeometry, LineString
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import oracle, postgis, spatialite, no_oracle, no_spatialite
from django.test import TestCase
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
class DistanceTest(TestCase):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transormed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test01_init(self):
"Test initialization of distance models."
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test02_dwithin(self):
"Testing the `dwithin` lookup type."
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple): dist1, dist2 = dist
else: dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle: type_error = True
else: type_error = False
if isinstance(dist, tuple):
if oracle: dist = dist[1]
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's s
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
if oracle: tol = 2
else: tol = 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test03b_distance_method(self):
"Testing the `distance` GeoQuerySet method on geodetic coordnate systems."
if oracle: tol = 2
else: tol = 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString( ( (150.902, -34.4245), (150.87, -34.5789) ) )
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test03c_distance_method(self):
"Testing the `distance` GeoQuerySet method used with `transform` on a geographic field."
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test04_distance_lookups(self):
"Testing the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types."
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test05_geodetic_distance_lookups(self):
"Testing distance lookups on geodetic coordinate systems."
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite: return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test06_area(self):
"Testing the `area` GeoQuerySet method."
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test07_length(self):
"Testing the `length` GeoQuerySet method."
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
if oracle: tol = 2
else: tol = 5
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test08_perimeter(self):
"Testing the `perimeter` GeoQuerySet method."
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
if oracle: tol = 2
else: tol = 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test09_measurement_null_fields(self):
"Testing the measurement GeoQuerySet methods on fields with NULL values."
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertEqual(None, z.distance)
self.assertEqual(None, z.area)
| |
"""Contains classes that represent a collection of addresses."""
import re
class InvalidAddressError(Exception):
"""The format of the given address was not recognized."""
pass
def _format_ip4(addr):
"""Return a string holding the human-friendly version of the IPv4
address *addr* in an integer representation.
"""
res = ""
res = res + str(addr >> 24) + "."
res = res + str((addr >> 16) & 0xff) + "."
res = res + str((addr >> 8) & 0xff) + "."
res = res + str(addr & 0xff)
return res
def _parse_ip4(addr):
"""Return the integer representation of the given address *addr* in a
human-friendly format (255.255.255.255).
"""
match = re.match(r"([0-9]{1,3})\." * 3 + r"([0-9]{1,3})", addr)
if match is None:
raise InvalidAddressError("Invalid IPv4 address: " + addr)
res = 0
for i in range(1, 5):
try:
if int(match.group(i)) > 255:
raise ValueError("Invalid IPv4 address: " + addr)
res = res * 256 + int(match.group(i))
except ValueError:
raise InvalidAddressError("Invalid IPv4 address: " + addr)
return res
class Ip4Range:
"""Represents an IPv4 subnet."""
range_type = 'ip4'
dns_record = 'A'
def sortable(self, addr):
"""Returns a sortable representation of an address from this range."""
return _parse_ip4(addr.addr)
def __init__(self, iprange):
"""Initialize the range with the given subnet *iprange* with format
x.x.x.x/x."""
subnet_pos = iprange.find("/")
if subnet_pos < 0 or iprange.count(".") != 3:
raise InvalidAddressError("Invalid IPv4 range definition: "
+ iprange)
self.net = _parse_ip4(iprange[:subnet_pos])
self.mask = int(iprange[subnet_pos + 1:])
if self.net < 0 or self.net > 2**32 or self.mask < 0 or self.mask > 32:
raise InvalidAddressError("Invalid IPv4 range definition: "
+ iprange)
#get the real subnet address: clear the 32 - mask last bits
self.net = self.net & ((2 ** (self.mask + 1) - 1) << (32 - self.mask))
def __iter__(self):
"""Iterator over the IPv4 subnet."""
for i in xrange(self.net, self.net + 2 ** (32 - self.mask)):
yield _format_ip4(i)
def __contains__(self, addr):
"""Return true if the given *addr* belongs to this subnet."""
try:
addr = _parse_ip4(addr)
except InvalidAddressError:
return False
return addr >= self.net and addr < self.net + 2 ** (32 - self.mask)
def len(self):
"""Return the number of addresses in this subnet."""
return 2 ** (32 - self.mask)
def __len__(self):
"""Return the number of addresses in this subnet."""
return self.len()
def __getitem__(self, ind):
"""Return the formatted addresses at index *n* from the subnet."""
# Allow the use of random.choice(Ip4Range)
if ind < 0 or ind >= 2 ** (32 - self.mask):
raise IndexError()
return _format_ip4(self.net + ind)
def __str__(self):
"""Return a human-readable representation of the subnet."""
return _format_ip4(self.net) + "/" + str(self.mask)
def _format_ip6(addr):
"""Return a string holding the human-friendly version of the IPv6
address *addr* in an integer representation.
"""
power = 112
res = ""
while power > 0:
# in format strings the arguments must be numbered before python2.7
res = res + "{0:04x}".format((addr >> power) & 0xffff) + ":"
power = power - 16
res = res + "{0:04x}".format(addr & 0xffff)
return res
def _parse_ip6(addr):
"""Return the integer representation of the given address *addr* in a
human-friendly format (19af:1234::abcd).
"""
if addr == "::":
addr = (":0" * 8)[1:] # strip the initial ':'
elif addr.find("::") >= 0:
abbrev = addr.find("::")
groups = addr.count(":")
if abbrev == 0 or abbrev == len(addr) - 2:
groups = groups - 1
add = (":0" * (8 - groups))[1:] # strip the initial ':'
if abbrev == 0: # expend the beginning
addr = add + addr[abbrev + 1:]
elif abbrev == len(addr) - 2: #expend the end
addr = addr[:abbrev + 1] + add
else:
addr = addr[:abbrev + 1] + add + addr[abbrev + 1:]
match = re.match("([a-fA-F0-9]{1,4}):" * 7 + "([a-fA-F0-9]{1,4})", addr)
if match is None:
raise InvalidAddressError("Invalid IPv6 address: " + addr)
res = 0
for i in range(1, 9):
try:
res = res * (2 ** 16) + int(match.group(i), 16)
except ValueError:
raise InvalidAddressError("Invalid IPv6 address: " + addr)
return res
class Ip6Range:
"""Represents an IPv6 subnet."""
range_type = "ip6"
dns_record = "AAAA"
def sortable(self, addr):
"""Returns a sortable representation of an address from this range."""
return _parse_ip6(addr.addr)
def __init__(self, iprange):
"""Initialize the range with the given subnet *iprange* with format
12ab:34cd::89ef/x.
"""
subnet_pos = iprange.find("/")
if subnet_pos < 0:
raise InvalidAddressError("Invalid IPv6 range: " + iprange)
self.net = _parse_ip6(iprange[:subnet_pos])
self.mask = int(iprange[subnet_pos + 1:])
if (self.net < 0 or self.net > 2**128 or
self.mask < 0 or self.mask > 128):
raise InvalidAddressError("Invalid IPv6 range: " + iprange)
#get the real subnet address: clear the (128 - mask) last bits
self.net = self.net & ((2 ** (self.mask + 1) - 1) << (128 - self.mask))
def __iter__(self):
"""Iterator over the IPv6 subnet."""
# IPv6 addresses are too big for xrange()
i = self.net
while i < self.net + 2 ** (128 - self.mask):
yield _format_ip6(i)
i = i + 1
def __contains__(self, addr):
"""Return true if the given *addr* belongs to this subnet."""
try:
addr = _parse_ip6(addr)
except InvalidAddressError:
return False
return addr >= self.net and addr < self.net + 2 ** (128 - self.mask)
def len(self):
"""Return the number of addresses in this subnet."""
return 2 ** (128 - self.mask)
def __len__(self):
"""Return the number of addresses in this subnet."""
return self.len()
def __getitem__(self, ind):
"""Return the formatted addresses at index *n* from the subnet."""
# Allow the use of random.choice(Ip6Range)
if ind < 0 or ind >= 2 ** (128 - self.mask):
raise IndexError()
return _format_ip6(self.net + ind)
def __str__(self):
"""Return a human-readable representation of the subnet."""
return _format_ip6(self.net) + "/" + str(self.mask)
class AddrSet:
"""Represent a generic set of addresses. The addresses must be added
manually.
"""
range_type = "set"
def sortable(self, addr):
"""Returns a sortable representation of an address from this range."""
return addr.addr
def __init__(self, addr_set=None, dns_record="A"):
"""Initialize a new address set of DNS record type *dns_record*."""
self.addr_set = addr_set
if self.addr_set is None:
self.addr_set = set()
self.dns_record = dns_record
def add(self, addr):
"""Add the address *addr* to the set."""
self.addr_set.add(addr)
def remove(self, addr):
"""Remove the address *addr* from the set."""
self.addr_set.remove(addr)
def __contains__(self, addr):
return addr in self.addr_set
def __iter__(self):
return iter(self.addr_set)
def len(self):
"""Return the number of elements in the set."""
return len(self.addr_set)
def __len__(self):
return self.len()
def __getitem__(self, ind):
if ind < 0 or ind >= len(self):
raise IndexError()
for addr in self:
if (ind == 0):
return addr
ind = ind - 1
def __str__(self):
res = ""
for elm in self.addr_set:
res = res + "," + str(elm)
# skip the first comma
return res[1:]
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sends email on behalf of application.
Provides functions for application developers to provide email services
for their applications. Also provides a few utility methods.
"""
import email
from email import MIMEBase
from email import MIMEMultipart
from email import MIMEText
from email import Parser
import email.header
import logging
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import mail_service_pb
from google.appengine.api import users
from google.appengine.api.mail_errors import *
from google.appengine.runtime import apiproxy_errors
ERROR_MAP = {
mail_service_pb.MailServiceError.BAD_REQUEST:
BadRequestError,
mail_service_pb.MailServiceError.UNAUTHORIZED_SENDER:
InvalidSenderError,
mail_service_pb.MailServiceError.INVALID_ATTACHMENT_TYPE:
InvalidAttachmentTypeError,
mail_service_pb.MailServiceError.INVALID_HEADER_NAME:
InvalidHeaderNameError,
}
EXTENSION_MIME_MAP = {
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'asc': 'text/plain',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'bmp': 'image/x-ms-bmp',
'css': 'text/css',
'csv': 'text/csv',
'doc': 'application/msword',
'docx': 'application/msword',
'diff': 'text/plain',
'flac': 'audio/flac',
'gif': 'image/gif',
'gzip': 'application/x-gzip',
'htm': 'text/html',
'html': 'text/html',
'ics': 'text/calendar',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'kml': 'application/vnd.google-earth.kml+xml',
'kmz': 'application/vnd.google-earth.kmz',
'm4a': 'audio/mp4',
'mid': 'audio/mid',
'mov': 'video/quicktime',
'mp3': 'audio/mpeg',
'mp4': 'video/mp4',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'odp': 'application/vnd.oasis.opendocument.presentation',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'odt': 'application/vnd.oasis.opendocument.text',
'oga': 'audio/ogg',
'ogg': 'audio/ogg',
'ogv': 'video/ogg',
'pdf': 'application/pdf',
'png': 'image/png',
'pot': 'text/plain',
'pps': 'application/vnd.ms-powerpoint',
'ppt': 'application/vnd.ms-powerpoint',
'pptx': 'application/vnd.ms-powerpoint',
'qt': 'video/quicktime',
'rmi': 'audio/mid',
'rss': 'text/rss+xml',
'snd': 'audio/basic',
'sxc': 'application/vnd.sun.xml.calc',
'sxw': 'application/vnd.sun.xml.writer',
'text': 'text/plain',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'txt': 'text/plain',
'vcf': 'text/directory',
'wav': 'audio/x-wav',
'wbmp': 'image/vnd.wap.wbmp',
'webm': 'video/webm',
'webp': 'image/webp',
'xls': 'application/vnd.ms-excel',
'xlsx': 'application/vnd.ms-excel',
'zip': 'application/zip'
}
EXTENSION_BLACKLIST = [
'ade',
'adp',
'bat',
'chm',
'cmd',
'com',
'cpl',
'exe',
'hta',
'ins',
'isp',
'jse',
'lib',
'mde',
'msc',
'msp',
'mst',
'pif',
'scr',
'sct',
'shb',
'sys',
'vb',
'vbe',
'vbs',
'vxd',
'wsc',
'wsf',
'wsh',
]
HEADER_WHITELIST = frozenset([
'Auto-Submitted',
'In-Reply-To',
'List-Id',
'List-Unsubscribe',
'On-Behalf-Of',
'References',
'Resent-Date',
'Resent-From',
'Resent-To',
])
def invalid_email_reason(email_address, field):
"""Determine reason why email is invalid.
Args:
email_address: Email to check.
field: Field that is invalid.
Returns:
String indicating invalid email reason if there is one,
else None.
"""
if email_address is None:
return 'None email address for %s.' % field
if isinstance(email_address, users.User):
email_address = email_address.email()
if not isinstance(email_address, basestring):
return 'Invalid email address type for %s.' % field
stripped_address = email_address.strip()
if not stripped_address:
return 'Empty email address for %s.' % field
return None
InvalidEmailReason = invalid_email_reason
def is_email_valid(email_address):
"""Determine if email is invalid.
Args:
email_address: Email to check.
Returns:
True if email is valid, else False.
"""
return invalid_email_reason(email_address, '') is None
IsEmailValid = is_email_valid
def check_email_valid(email_address, field):
"""Check that email is valid.
Args:
email_address: Email to check.
field: Field to check.
Raises:
InvalidEmailError if email_address is invalid.
"""
reason = invalid_email_reason(email_address, field)
if reason is not None:
raise InvalidEmailError(reason)
CheckEmailValid = check_email_valid
def is_ascii(string):
"""Return whether a string is in ascii."""
return all(ord(c) < 128 for c in string)
def invalid_headers_reason(headers):
"""Determine reason why headers is invalid.
Args:
headers: headers value to check.
Returns:
String indicating invalid headers reason if there is one,
else None.
"""
if headers is None:
return 'Headers dictionary was None.'
if not isinstance(headers, dict):
return 'Invalid type for headers. Should be a dictionary.'
for k, v in headers.iteritems():
if not isinstance(k, basestring):
return 'Header names should be strings.'
if not isinstance(v, basestring):
return 'Header values should be strings.'
if not is_ascii(k):
return 'Header name should be an ASCII string.'
if k.strip() not in HEADER_WHITELIST:
return 'Header "%s" is not allowed.' % k.strip()
def check_headers_valid(headers):
"""Check that headers is a valid dictionary for headers.
Args:
headers: the value to check for the headers.
Raises:
InvalidEmailError if headers is invalid.
"""
reason = invalid_headers_reason(headers)
if reason is not None:
raise InvalidEmailError(reason)
def _email_check_and_list(emails, field):
"""Generate a list of emails.
Args:
emails: Single email or list of emails.
Returns:
Sequence of email addresses.
Raises:
InvalidEmailError if any email addresses are invalid.
"""
if isinstance(emails, types.StringTypes):
check_email_valid(value)
else:
for address in iter(emails):
check_email_valid(address, field)
def _email_sequence(emails):
"""Forces email to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single email string.
Args:
emails: Emails (or email) to coerce to sequence.
Returns:
Single tuple with email in it if only one email string provided,
else returns emails as is.
"""
if isinstance(emails, basestring):
return emails,
return emails
def _attachment_sequence(attachments):
"""Forces attachments to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single attachment.
Args:
attachments: Attachments (or attachment) to coerce to sequence.
Returns:
Single tuple with attachment tuple in it if only one attachment provided,
else returns attachments as is.
"""
if len(attachments) == 2 and isinstance(attachments[0], basestring):
attachments = attachments,
for attachment in attachments:
if isinstance(attachment, Attachment):
yield attachment
else:
yield Attachment(*attachment)
def _parse_mime_message(mime_message):
"""Helper function converts a mime_message in to email.Message.Message.
Args:
mime_message: MIME Message, string or file containing mime message.
Returns:
Instance of email.Message.Message. Will return mime_message if already
an instance.
"""
if isinstance(mime_message, email.Message.Message):
return mime_message
elif isinstance(mime_message, basestring):
return email.message_from_string(mime_message)
else:
return email.message_from_file(mime_message)
def send_mail(sender,
to,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
to: List of 'to' addresses or a single address.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['to'] = to
kw['subject'] = subject
kw['body'] = body
message = EmailMessage(**kw)
message.send(make_sync_call)
SendMail = send_mail
def send_mail_to_admins(sender,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail to admins on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['subject'] = subject
kw['body'] = body
message = AdminEmailMessage(**kw)
message.send(make_sync_call)
SendMailToAdmins = send_mail_to_admins
def _GetMimeType(file_name):
"""Determine mime-type from file name.
Parses file name and determines mime-type based on extension map.
This method is not part of the public API and should not be used by
applications.
Args:
file_name: File to determine extension for.
Returns:
Mime-type associated with file extension.
Raises:
InvalidAttachmentTypeError when the file name of an attachment.
"""
extension_index = file_name.rfind('.')
if extension_index == -1:
extension = ''
else:
extension = file_name[extension_index + 1:].lower()
if extension in EXTENSION_BLACKLIST:
raise InvalidAttachmentTypeError(
'Extension %s is not supported.' % extension)
mime_type = EXTENSION_MIME_MAP.get(extension, None)
if mime_type is None:
mime_type = 'application/octet-stream'
return mime_type
def _GuessCharset(text):
"""Guess the charset of a text.
Args:
text: a string (str) that is either a us-ascii string or a unicode that was
encoded in utf-8.
Returns:
Charset needed by the string, either 'us-ascii' or 'utf-8'.
"""
try:
text.decode('us-ascii')
return 'us-ascii'
except UnicodeDecodeError:
return 'utf-8'
def _I18nHeader(text):
"""Creates a header properly encoded even with unicode content.
Args:
text: a string (str) that is either a us-ascii string or a unicode that was
encoded in utf-8.
Returns:
email.header.Header
"""
charset = _GuessCharset(text)
return email.header.Header(text, charset, maxlinelen=1e3000)
def mail_message_to_mime_message(protocol_message):
"""Generate a MIMEMultitype message from protocol buffer.
Generates a complete MIME multi-part email object from a MailMessage
protocol buffer. The body fields are sent as individual alternatives
if they are both present, otherwise, only one body part is sent.
Multiple entry email fields such as 'To', 'Cc' and 'Bcc' are converted
to a list of comma separated email addresses.
Args:
protocol_message: Message PB to convert to MIMEMultitype.
Returns:
MIMEMultitype representing the provided MailMessage.
Raises:
InvalidAttachmentTypeError when the file name of an attachment
"""
parts = []
if protocol_message.has_textbody():
parts.append(MIMEText.MIMEText(
protocol_message.textbody(),
_charset=_GuessCharset(protocol_message.textbody())))
if protocol_message.has_htmlbody():
parts.append(MIMEText.MIMEText(
protocol_message.htmlbody(), _subtype='html',
_charset=_GuessCharset(protocol_message.htmlbody())))
if len(parts) == 1:
payload = parts
else:
payload = [MIMEMultipart.MIMEMultipart('alternative', _subparts=parts)]
result = MIMEMultipart.MIMEMultipart(_subparts=payload)
for attachment in protocol_message.attachment_list():
file_name = attachment.filename()
mime_type = _GetMimeType(file_name)
maintype, subtype = mime_type.split('/')
mime_attachment = MIMEBase.MIMEBase(maintype, subtype)
mime_attachment.add_header('Content-Disposition',
'attachment',
filename=attachment.filename())
mime_attachment.set_payload(attachment.data())
if attachment.has_contentid():
mime_attachment['content-id'] = attachment.contentid()
result.attach(mime_attachment)
if protocol_message.to_size():
result['To'] = _I18nHeader(', '.join(protocol_message.to_list()))
if protocol_message.cc_size():
result['Cc'] = _I18nHeader(', '.join(protocol_message.cc_list()))
if protocol_message.bcc_size():
result['Bcc'] = _I18nHeader(', '.join(protocol_message.bcc_list()))
result['From'] = _I18nHeader(protocol_message.sender())
result['Reply-To'] = _I18nHeader(protocol_message.replyto())
result['Subject'] = _I18nHeader(protocol_message.subject())
for header in protocol_message.header_list():
result[header.name()] = _I18nHeader(header.value())
return result
MailMessageToMIMEMessage = mail_message_to_mime_message
def _to_str(value):
"""Helper function to make sure unicode values converted to utf-8.
Args:
value: str or unicode to convert to utf-8.
Returns:
UTF-8 encoded str of value, otherwise value unchanged.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
return value
def _decode_and_join_header(header, separator=u' '):
"""Helper function to decode RFC2047 encoded headers.
Args:
header: RFC2047 encoded str (or just a plain old str) to convert to unicode.
separator: The separator to use when joining separately encoded pieces of
the header.
Returns:
unicode of decoded header or just header if it was None or ''.
"""
if not header:
return header
return separator.join(unicode(s, c or 'us-ascii')
for s, c in email.header.decode_header(header))
def _decode_address_list_field(address_list):
"""Helper function to decode (potentially RFC2047 encoded) address lists.
Args:
address_list: a single str header, or list of str headers.
Returns:
unicode of decoded header or list of str headers.
"""
if not address_list:
return None
if len(address_list) == 1:
return _decode_and_join_header(address_list[0])
else:
return map(_decode_and_join_header, address_list)
def wrapping(wrapped):
def wrapping_wrapper(wrapper):
try:
wrapper.__wrapped__ = wrapped
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
wrapper.__dict__.update(wrapped.__dict__)
except Exception:
pass
return wrapper
return wrapping_wrapper
def _positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
"""
def positional_decorator(wrapped):
@wrapping(wrapped)
def positional_wrapper(*args, **kwds):
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator
class Attachment(object):
"""Attachment object.
An Attachment object is largely interchangeable with a (filename, payload)
tuple.
Note that the behavior is a bit asymmetric with respect to unpacking and
equality comparison. An Attachment object without a content ID will be
equivalent to a (filename, payload) tuple. An Attachment with a content ID
will unpack to a (filename, payload) tuple, but will compare unequally to
that tuple.
Thus, the following comparison will succeed:
attachment = mail.Attachment('foo.jpg', 'data')
filename, payload = attachment
attachment == filename, payload
...while the following will fail:
attachment = mail.Attachment('foo.jpg', 'data', content_id='<foo>')
filename, payload = attachment
attachment == filename, payload
The following comparison will pass though:
attachment = mail.Attachment('foo.jpg', 'data', content_id='<foo>')
attachment == (attachment.filename,
attachment.payload,
attachment.content_id)
Attributes:
filename: The name of the attachment.
payload: The attachment data.
content_id: Optional. The content-id for this attachment. Keyword-only.
"""
@_positional(3)
def __init__(self, filename, payload, content_id=None):
"""Constructor.
Arguments:
filename: The name of the attachment
payload: The attachment data.
content_id: Optional. The content-id for this attachment.
"""
self.filename = filename
self.payload = payload
self.content_id = content_id
def __eq__(self, other):
self_tuple = (self.filename, self.payload, self.content_id)
if isinstance(other, Attachment):
other_tuple = (other.filename, other.payload, other.content_id)
elif not hasattr(other, '__len__'):
return NotImplemented
elif len(other) == 2:
other_tuple = other + (None,)
elif len(other) == 3:
other_tuple = other
else:
return NotImplemented
return self_tuple == other_tuple
def __hash__(self):
if self.content_id:
return hash((self.filename, self.payload, self.content_id))
else:
return hash((self.filename, self.payload))
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter((self.filename, self.payload))
def __getitem__(self, i):
return tuple(iter(self))[i]
def __contains__(self, val):
return val in (self.filename, self.payload)
def __len__(self):
return 2
class EncodedPayload(object):
"""Wrapper for a payload that contains encoding information.
When an email is received, it is usually encoded using a certain
character set, and then possibly further encoded using a transfer
encoding in that character set. Most of the times, it is possible
to decode the encoded payload as is, however, in the case where it
is not, the encoded payload and the original encoding information
must be preserved.
Attributes:
payload: The original encoded payload.
charset: The character set of the encoded payload. None means use
default character set.
encoding: The transfer encoding of the encoded payload. None means
content not encoded.
"""
def __init__(self, payload, charset=None, encoding=None):
"""Constructor.
Args:
payload: Maps to attribute of the same name.
charset: Maps to attribute of the same name.
encoding: Maps to attribute of the same name.
"""
self.payload = payload
self.charset = charset
self.encoding = encoding
def decode(self):
"""Attempt to decode the encoded data.
Attempt to use pythons codec library to decode the payload. All
exceptions are passed back to the caller.
Returns:
Binary or unicode version of payload content.
"""
payload = self.payload
if self.encoding and self.encoding.lower() != '7bit':
try:
payload = payload.decode(self.encoding)
except LookupError:
raise UnknownEncodingError('Unknown decoding %s.' % self.encoding)
except (Exception, Error), e:
raise PayloadEncodingError('Could not decode payload: %s' % e)
if self.charset and str(self.charset).lower() != '7bit':
try:
payload = payload.decode(str(self.charset))
except LookupError:
raise UnknownCharsetError('Unknown charset %s.' % self.charset)
except (Exception, Error), e:
raise PayloadEncodingError('Could read characters: %s' % e)
return payload
def __eq__(self, other):
"""Equality operator.
Args:
other: The other EncodedPayload object to compare with. Comparison
with other object types are not implemented.
Returns:
True of payload and encodings are equal, else false.
"""
if isinstance(other, EncodedPayload):
return (self.payload == other.payload and
self.charset == other.charset and
self.encoding == other.encoding)
else:
return NotImplemented
def __hash__(self):
"""Hash an EncodedPayload."""
return hash((self.payload, self.charset, self.encoding))
def copy_to(self, mime_message):
"""Copy contents to MIME message payload.
If no content transfer encoding is specified, and the character set does
not equal the over-all message encoding, the payload will be base64
encoded.
Args:
mime_message: Message instance to receive new payload.
"""
if self.encoding:
mime_message['content-transfer-encoding'] = self.encoding
mime_message.set_payload(self.payload, self.charset)
def to_mime_message(self):
"""Convert to MIME message.
Returns:
MIME message instance of payload.
"""
mime_message = email.Message.Message()
self.copy_to(mime_message)
return mime_message
def __str__(self):
"""String representation of encoded message.
Returns:
MIME encoded representation of encoded payload as an independent message.
"""
return str(self.to_mime_message())
def __repr__(self):
"""Basic representation of encoded payload.
Returns:
Payload itself is represented by its hash value.
"""
result = '<EncodedPayload payload=#%d' % hash(self.payload)
if self.charset:
result += ' charset=%s' % self.charset
if self.encoding:
result += ' encoding=%s' % self.encoding
return result + '>'
class _EmailMessageBase(object):
"""Base class for email API service objects.
Subclasses must define a class variable called _API_CALL with the name
of its underlying mail sending API call.
"""
PROPERTIES = set([
'sender',
'reply_to',
'subject',
'body',
'html',
'attachments',
])
ALLOWED_EMPTY_PROPERTIES = set([
'subject',
'body'
])
PROPERTIES.update(('to', 'cc', 'bcc'))
def __init__(self, mime_message=None, **kw):
"""Initialize Email message.
Creates new MailMessage protocol buffer and initializes it with any
keyword arguments.
Args:
mime_message: MIME message to initialize from. If instance of
email.Message.Message will take ownership as original message.
kw: List of keyword properties as defined by PROPERTIES.
"""
if mime_message:
mime_message = _parse_mime_message(mime_message)
self.update_from_mime_message(mime_message)
self.__original = mime_message
self.initialize(**kw)
@property
def original(self):
"""Get original MIME message from which values were set."""
return self.__original
def initialize(self, **kw):
"""Keyword initialization.
Used to set all fields of the email message using keyword arguments.
Args:
kw: List of keyword properties as defined by PROPERTIES.
"""
for name, value in kw.iteritems():
setattr(self, name, value)
def Initialize(self, **kw):
self.initialize(**kw)
def check_initialized(self):
"""Check if EmailMessage is properly initialized.
Test used to determine if EmailMessage meets basic requirements
for being used with the mail API. This means that the following
fields must be set or have at least one value in the case of
multi value fields:
- Subject must be set.
- A recipient must be specified.
- Must contain a body.
- All bodies and attachments must decode properly.
This check does not include determining if the sender is actually
authorized to send email for the application.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingRecipientsError: No recipients specified in to, cc or bcc.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
PayloadEncodingError: Payload is not properly encoded.
UnknownEncodingError: Payload has unknown encoding.
UnknownCharsetError: Payload has unknown character set.
"""
if not hasattr(self, 'sender'):
raise MissingSenderError()
found_body = False
try:
body = self.body
except AttributeError:
pass
else:
if isinstance(body, EncodedPayload):
body.decode()
found_body = True
try:
html = self.html
except AttributeError:
pass
else:
if isinstance(html, EncodedPayload):
html.decode()
found_body = True
if hasattr(self, 'attachments'):
for attachment in _attachment_sequence(self.attachments):
_GetMimeType(attachment.filename)
if isinstance(attachment.payload, EncodedPayload):
attachment.payload.decode()
def CheckInitialized(self):
self.check_initialized()
def is_initialized(self):
"""Determine if EmailMessage is properly initialized.
Returns:
True if message is properly initializes, otherwise False.
"""
try:
self.check_initialized()
return True
except Error:
return False
def IsInitialized(self):
return self.is_initialized()
def ToProto(self):
"""Convert mail message to protocol message.
Unicode strings are converted to UTF-8 for all fields.
This method is overriden by EmailMessage to support the sender fields.
Returns:
MailMessage protocol version of mail message.
Raises:
Passes through decoding errors that occur when using when decoding
EncodedPayload objects.
"""
self.check_initialized()
message = mail_service_pb.MailMessage()
message.set_sender(_to_str(self.sender))
if hasattr(self, 'reply_to'):
message.set_replyto(_to_str(self.reply_to))
if hasattr(self, 'subject'):
message.set_subject(_to_str(self.subject))
else:
message.set_subject('')
if hasattr(self, 'body'):
body = self.body
if isinstance(body, EncodedPayload):
body = body.decode()
message.set_textbody(_to_str(body))
if hasattr(self, 'html'):
html = self.html
if isinstance(html, EncodedPayload):
html = html.decode()
message.set_htmlbody(_to_str(html))
if hasattr(self, 'attachments'):
for attachment in _attachment_sequence(self.attachments):
if isinstance(attachment.payload, EncodedPayload):
attachment.payload = attachment.payload.decode()
protoattachment = message.add_attachment()
protoattachment.set_filename(_to_str(attachment.filename))
protoattachment.set_data(_to_str(attachment.payload))
if attachment.content_id:
protoattachment.set_contentid(attachment.content_id)
return message
def to_mime_message(self):
"""Generate a MIMEMultitype message from EmailMessage.
Calls MailMessageToMessage after converting self to protocol
buffer. Protocol buffer is better at handing corner cases
than EmailMessage class.
Returns:
MIMEMultitype representing the provided MailMessage.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
"""
return mail_message_to_mime_message(self.ToProto())
def ToMIMEMessage(self):
return self.to_mime_message()
def send(self, make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Send email message.
Send properly initialized email message via email API.
Args:
make_sync_call: Method which will make synchronous call to api proxy.
Raises:
Errors defined in this file above.
"""
message = self.ToProto()
response = api_base_pb.VoidProto()
try:
make_sync_call('mail', self._API_CALL, message, response)
except apiproxy_errors.ApplicationError, e:
if e.application_error in ERROR_MAP:
raise ERROR_MAP[e.application_error](e.error_detail)
raise e
def Send(self, *args, **kwds):
self.send(*args, **kwds)
def _check_attachment(self, attachment):
if not (isinstance(attachment.filename, basestring) or
isinstance(attachment.payload, basestring)):
raise TypeError()
def _check_attachments(self, attachments):
"""Checks values going to attachment field.
Mainly used to check type safety of the values. Each value of the list
must be a pair of the form (file_name, data), and both values a string
type.
Args:
attachments: Collection of attachment tuples.
Raises:
TypeError if values are not string type.
"""
attachments = _attachment_sequence(attachments)
for attachment in attachments:
self._check_attachment(attachment)
def __setattr__(self, attr, value):
"""Property setting access control.
Controls write access to email fields.
Args:
attr: Attribute to access.
value: New value for field.
Raises:
ValueError: If provided with an empty field.
AttributeError: If not an allowed assignment field.
"""
if not attr.startswith('_EmailMessageBase'):
if attr in ['sender', 'reply_to']:
check_email_valid(value, attr)
if not value and not attr in self.ALLOWED_EMPTY_PROPERTIES:
raise ValueError('May not set empty value for \'%s\'' % attr)
if attr not in self.PROPERTIES:
raise AttributeError('\'EmailMessage\' has no attribute \'%s\'' % attr)
if attr == 'attachments':
self._check_attachments(value)
super(_EmailMessageBase, self).__setattr__(attr, value)
def _add_body(self, content_type, payload):
"""Add body to email from payload.
Will overwrite any existing default plain or html body.
Args:
content_type: Content-type of body.
payload: Payload to store body as.
"""
if content_type == 'text/plain':
self.body = payload
elif content_type == 'text/html':
self.html = payload
def _update_payload(self, mime_message):
"""Update payload of mail message from mime_message.
This function works recusively when it receives a multipart body.
If it receives a non-multi mime object, it will determine whether or
not it is an attachment by whether it has a filename or not. Attachments
and bodies are then wrapped in EncodedPayload with the correct charsets and
encodings.
Args:
mime_message: A Message MIME email object.
"""
payload = mime_message.get_payload()
if payload:
if mime_message.get_content_maintype() == 'multipart':
for alternative in payload:
self._update_payload(alternative)
else:
filename = mime_message.get_param('filename',
header='content-disposition')
if filename:
filename = email.utils.collapse_rfc2231_value(filename)
if not filename:
filename = mime_message.get_param('name')
payload = EncodedPayload(payload,
(mime_message.get_content_charset() or
mime_message.get_charset()),
mime_message['content-transfer-encoding'])
if 'content-id' in mime_message:
attachment = Attachment(filename,
payload,
content_id=mime_message['content-id'])
else:
attachment = Attachment(filename, payload)
if filename:
try:
attachments = self.attachments
except AttributeError:
self.attachments = [attachment]
else:
if isinstance(attachments[0], basestring):
self.attachments = [attachments]
attachments = self.attachments
attachments.append(attachment)
else:
self._add_body(mime_message.get_content_type(), payload)
def update_from_mime_message(self, mime_message):
"""Copy information from a mime message.
Set information of instance to values of mime message. This method
will only copy values that it finds. Any missing values will not
be copied, nor will they overwrite old values with blank values.
This object is not guaranteed to be initialized after this call.
Args:
mime_message: email.Message instance to copy information from.
Returns:
MIME Message instance of mime_message argument.
"""
mime_message = _parse_mime_message(mime_message)
sender = _decode_and_join_header(mime_message['from'])
if sender:
self.sender = sender
reply_to = _decode_and_join_header(mime_message['reply-to'])
if reply_to:
self.reply_to = reply_to
subject = _decode_and_join_header(mime_message['subject'], separator=u'')
if subject:
self.subject = subject
self._update_payload(mime_message)
def bodies(self, content_type=None):
"""Iterate over all bodies.
Yields:
Tuple (content_type, payload) for html and body in that order.
"""
if (not content_type or
content_type == 'text' or
content_type == 'text/html'):
try:
yield 'text/html', self.html
except AttributeError:
pass
if (not content_type or
content_type == 'text' or
content_type == 'text/plain'):
try:
yield 'text/plain', self.body
except AttributeError:
pass
class EmailMessage(_EmailMessageBase):
"""Main interface to email API service.
This class is used to programmatically build an email message to send via
the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Example Usage:
An EmailMessage can be built completely by the constructor.
EmailMessage(sender='sender@nowhere.com',
to='recipient@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an email in different
places throughout the code. For this, EmailMessage is mutable.
message = EmailMessage()
message.sender = 'sender@nowhere.com'
message.to = ['recipient1@nowhere.com', 'recipient2@nowhere.com']
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'Send'
PROPERTIES = set(_EmailMessageBase.PROPERTIES | set(('headers',)))
def check_initialized(self):
"""Provide additional checks to ensure recipients have been specified.
Raises:
MissingRecipientError when no recipients specified in to, cc or bcc.
"""
if (not hasattr(self, 'to') and
not hasattr(self, 'cc') and
not hasattr(self, 'bcc')):
raise MissingRecipientsError()
super(EmailMessage, self).check_initialized()
def CheckInitialized(self):
self.check_initialized()
def ToProto(self):
"""Does addition conversion of recipient fields to protocol buffer.
Returns:
MailMessage protocol version of mail message including sender fields.
"""
message = super(EmailMessage, self).ToProto()
for attribute, adder in (('to', message.add_to),
('cc', message.add_cc),
('bcc', message.add_bcc)):
if hasattr(self, attribute):
for address in _email_sequence(getattr(self, attribute)):
adder(_to_str(address))
for name, value in getattr(self, 'headers', {}).iteritems():
header = message.add_header()
header.set_name(name)
header.set_value(_to_str(value))
return message
def __setattr__(self, attr, value):
"""Provides additional checks on recipient fields."""
if attr in ['to', 'cc', 'bcc']:
if isinstance(value, basestring):
if value == '' and getattr(self, 'ALLOW_BLANK_EMAIL', False):
return
check_email_valid(value, attr)
else:
for address in value:
check_email_valid(address, attr)
elif attr == 'headers':
check_headers_valid(value)
super(EmailMessage, self).__setattr__(attr, value)
def update_from_mime_message(self, mime_message):
"""Copy information from a mime message.
Update fields for recipients.
Args:
mime_message: email.Message instance to copy information from.
"""
mime_message = _parse_mime_message(mime_message)
super(EmailMessage, self).update_from_mime_message(mime_message)
to = _decode_address_list_field(mime_message.get_all('to'))
if to:
self.to = to
cc = _decode_address_list_field(mime_message.get_all('cc'))
if cc:
self.cc = cc
bcc = _decode_address_list_field(mime_message.get_all('bcc'))
if bcc:
self.bcc = bcc
class AdminEmailMessage(_EmailMessageBase):
"""Interface to sending email messages to all admins via the amil API.
This class is used to programmatically build an admin email message to send
via the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Unlike the normal email message, addresses in the recipient fields are
ignored and not used for sending.
Example Usage:
An AdminEmailMessage can be built completely by the constructor.
AdminEmailMessage(sender='sender@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an admin email in
different places throughout the code. For this, AdminEmailMessage is
mutable.
message = AdminEmailMessage()
message.sender = 'sender@nowhere.com'
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'SendToAdmins'
__UNUSED_PROPERTIES = set(('to', 'cc', 'bcc'))
def __setattr__(self, attr, value):
if attr in self.__UNUSED_PROPERTIES:
logging.warning('\'%s\' is not a valid property to set '
'for AdminEmailMessage. It is unused.', attr)
super(AdminEmailMessage, self).__setattr__(attr, value)
class InboundEmailMessage(EmailMessage):
"""Parsed email object as recevied from external source.
Has a date field and can store any number of additional bodies. These
additional attributes make the email more flexible as required for
incoming mail, where the developer has less control over the content.
Example Usage:
# Read mail message from CGI input.
message = InboundEmailMessage(sys.stdin.read())
logging.info('Received email message from %s at %s',
message.sender,
message.date)
enriched_body = list(message.bodies('text/enriched'))[0]
... Do something with body ...
"""
__HEADER_PROPERTIES = {'date': 'date',
'message_id': 'message-id',
}
PROPERTIES = frozenset(_EmailMessageBase.PROPERTIES |
set(('alternate_bodies',)) |
set(__HEADER_PROPERTIES.iterkeys()))
ALLOW_BLANK_EMAIL = True
def update_from_mime_message(self, mime_message):
"""Update values from MIME message.
Copies over date values.
Args:
mime_message: email.Message instance to copy information from.
"""
mime_message = _parse_mime_message(mime_message)
super(InboundEmailMessage, self).update_from_mime_message(mime_message)
for property, header in InboundEmailMessage.__HEADER_PROPERTIES.iteritems():
value = mime_message[header]
if value:
setattr(self, property, value)
def _add_body(self, content_type, payload):
"""Add body to inbound message.
Method is overidden to handle incoming messages that have more than one
plain or html bodies or has any unidentified bodies.
This method will not overwrite existing html and body values. This means
that when updating, the text and html bodies that are first in the MIME
document order are assigned to the body and html properties.
Args:
content_type: Content-type of additional body.
payload: Content of additional body.
"""
if (content_type == 'text/plain' and not hasattr(self, 'body') or
content_type == 'text/html' and not hasattr(self, 'html')):
super(InboundEmailMessage, self)._add_body(content_type, payload)
else:
try:
alternate_bodies = self.alternate_bodies
except AttributeError:
alternate_bodies = self.alternate_bodies = [(content_type, payload)]
else:
alternate_bodies.append((content_type, payload))
def bodies(self, content_type=None):
"""Iterate over all bodies.
Args:
content_type: Content type to filter on. Allows selection of only
specific types of content. Can be just the base type of the content
type. For example:
content_type = 'text/html' # Matches only HTML content.
content_type = 'text' # Matches text of any kind.
Yields:
Tuple (content_type, payload) for all bodies of message, including body,
html and all alternate_bodies in that order.
"""
main_bodies = super(InboundEmailMessage, self).bodies(content_type)
for payload_type, payload in main_bodies:
yield payload_type, payload
partial_type = bool(content_type and content_type.find('/') < 0)
try:
for payload_type, payload in self.alternate_bodies:
if content_type:
if partial_type:
match_type = payload_type.split('/')[0]
else:
match_type = payload_type
match = match_type == content_type
else:
match = True
if match:
yield payload_type, payload
except AttributeError:
pass
def to_mime_message(self):
"""Convert to MIME message.
Adds additional headers from inbound email.
Returns:
MIME message instance of payload.
"""
mime_message = super(InboundEmailMessage, self).to_mime_message()
for property, header in InboundEmailMessage.__HEADER_PROPERTIES.iteritems():
try:
mime_message[header] = getattr(self, property)
except AttributeError:
pass
return mime_message
Parser.Parser
| |
from __future__ import absolute_import, unicode_literals
from datetime import date
import unittest
import warnings
from django import forms
from django.core.exceptions import FieldError, ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms.models import (modelform_factory, ModelChoiceField,
fields_for_model, construct_instance, ModelFormMetaclass)
from django.utils import six
from django.test import TestCase
from .models import (Person, RealPerson, Triple, FilePathModel, Article,
Publication, CustomFF, Author, Author1, Homepage, Document, Edition)
class ModelMultipleChoiceFieldTests(TestCase):
def test_model_multiple_choice_number_of_queries(self):
"""
Test that ModelMultipleChoiceField does O(1) queries instead of
O(n) (#10156).
"""
persons = [Person.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
Test that ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Person.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Person.objects.all(),
validators=[my_validator])
f.clean([p.pk for p in Person.objects.all()[8:9]])
self.assertTrue(self._validator_run)
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
class UniqueTogetherTests(TestCase):
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
class OverrideCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
with self.assertRaises(TypeError):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
# Regression test for #12960.
# Make sure the cleaned_data returned from ModelForm.clean() is applied to the
# model instance.
class PublicationForm(forms.ModelForm):
def clean(self):
self.cleaned_data['title'] = self.cleaned_data['title'].upper()
return self.cleaned_data
class Meta:
model = Publication
fields = '__all__'
class ModelFormCleanTest(TestCase):
def test_model_form_clean_applies_to_model(self):
data = {'title': 'test', 'date_published': '2010-2-25'}
form = PublicationForm(data)
publication = form.save()
self.assertEqual(publication.title, 'TEST')
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
class FilePathFieldTests(TestCase):
def test_file_path_field_blank(self):
"""
Regression test for #8842: FilePathField(blank=True)
"""
form = FPForm()
names = [p[1] for p in form['path'].field.choices]
names.sort()
self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'tests.py'])
class ManyToManyCallableInitialTests(TestCase):
def test_callable(self):
"Regression for #10349: A callable can be provided as the initial value for an m2m field"
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'publications':
kwargs['initial'] = lambda: Publication.objects.all().order_by('date_published')[:2]
return db_field.formfield(**kwargs)
# Set up some Publications to use as data
book1 = Publication.objects.create(title="First Book", date_published=date(2007,1,1))
book2 = Publication.objects.create(title="Second Book", date_published=date(2008,1,1))
book3 = Publication.objects.create(title="Third Book", date_published=date(2009,1,1))
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(Article, fields="__all__",
formfield_callback=formfield_for_dbfield)
form = ModelForm()
self.assertHTMLEqual(form.as_ul(), """<li><label for="id_headline">Headline:</label> <input id="id_headline" type="text" name="headline" maxlength="100" /></li>
<li><label for="id_publications">Publications:</label> <select multiple="multiple" name="publications" id="id_publications">
<option value="%d" selected="selected">First Book</option>
<option value="%d" selected="selected">Second Book</option>
<option value="%d">Third Book</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>"""
% (book1.pk, book2.pk, book3.pk))
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
class CustomFieldSaveTests(TestCase):
def test_save(self):
"Regression for #11149: save_form_data should be called only once"
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data = {'f': None})
form.save()
class ModelChoiceIteratorTests(TestCase):
def test_len(self):
class Form(forms.ModelForm):
class Meta:
model = Article
fields = ["publications"]
Publication.objects.create(title="Pravda",
date_published=date(1991, 8, 22))
f = Form()
self.assertEqual(len(f.fields["publications"].choices), 1)
class RealPersonForm(forms.ModelForm):
class Meta:
model = RealPerson
fields = '__all__'
class CustomModelFormSaveMethod(TestCase):
def test_string_message(self):
data = {'name': 'anonymous'}
form = RealPersonForm(data)
self.assertEqual(form.is_valid(), False)
self.assertEqual(form.errors['__all__'], ['Please specify a real name.'])
class ModelClassTests(TestCase):
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
self.assertRaises(ValueError, NoModelModelForm)
class OneToOneFieldTests(TestCase):
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication':'', 'full_name':'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['publication'], None)
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retreive it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertEqual(new_author.publication, None)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication':'', 'full_name':'John Doe'}, instance=author)
self.assertTrue(not form.is_valid())
class ModelChoiceForm(forms.Form):
person = ModelChoiceField(Person.objects.all())
class TestTicket11183(TestCase):
def test_11183(self):
form1 = ModelChoiceForm()
field1 = form1.fields['person']
# To allow the widget to change the queryset of field1.widget.choices correctly,
# without affecting other forms, the following must hold:
self.assertTrue(field1 is not ModelChoiceForm.base_fields['person'])
self.assertTrue(field1.widget.choices.field is field1)
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
class URLFieldTests(TestCase):
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_http_prefixing(self):
"If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)"
form = HomepageForm({'url': 'example.com'})
form.is_valid()
# self.assertTrue(form.is_valid())
# self.assertEqual(form.cleaned_data['url'], 'http://example.com/')
form = HomepageForm({'url': 'example.com/test'})
form.is_valid()
# self.assertTrue(form.is_valid())
# self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class FormFieldCallbackTests(TestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertTrue(Form.base_fields['name'].widget is widget)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name':widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_custom_callback(self):
"""Test that a custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
_ = modelform_factory(Person, form=BaseForm,
formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args,
[(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
self.assertRaises(TypeError, modelform_factory, Person, fields="__all__",
formfield_callback='not a function or callable')
class InvalidFieldAndFactory(TestCase):
""" Tests for #11905 """
def test_extra_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
except FieldError as e:
# Make sure the exception contains some reference to the
# field responsible for the problem.
self.assertTrue('no-field' in e.args[0])
else:
self.fail('Invalid "no-field" field not caught')
def test_extra_declared_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
except FieldError:
self.fail('Declarative field raised FieldError incorrectly')
def test_extra_field_modelform_factory(self):
self.assertRaises(FieldError, modelform_factory,
Person, fields=['no-field', 'name'])
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
class FileFieldTests(unittest.TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertEqual(f.clean(False), False)
self.assertEqual(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
self.assertRaises(ValidationError, f.clean, False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
form = DocumentForm()
self.assertTrue('name="myfile"' in six.text_type(form))
self.assertTrue('myfile-clear' not in six.text_type(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertTrue('myfile-clear' in six.text_type(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertEqual(bool(doc.myfile), False)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'})
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = six.text_type(form)
self.assertTrue('something.txt' in rendered)
self.assertTrue('myfile-clear' in rendered)
class EditionForm(forms.ModelForm):
author = forms.ModelChoiceField(queryset=Person.objects.all())
publication = forms.ModelChoiceField(queryset=Publication.objects.all())
edition = forms.IntegerField()
isbn = forms.CharField(max_length=13)
class Meta:
model = Edition
fields = '__all__'
class UniqueErrorsTests(TestCase):
def setUp(self):
self.author1 = Person.objects.create(name='Author #1')
self.author2 = Person.objects.create(name='Author #2')
self.pub1 = Publication.objects.create(title='Pub #1', date_published=date(2000, 10, 31))
self.pub2 = Publication.objects.create(title='Pub #2', date_published=date(2004, 1, 5))
form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub1.pk, 'edition': 1, 'isbn': '9783161484100'})
form.save()
def test_unique_error_message(self):
form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub2.pk, 'edition': 1, 'isbn': '9783161484100'})
self.assertEqual(form.errors, {'isbn': ['Edition with this Isbn already exists.']})
def test_unique_together_error_message(self):
form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub1.pk, 'edition': 2, 'isbn': '9783161489999'})
self.assertEqual(form.errors, {'__all__': ['Edition with this Author and Publication already exists.']})
form = EditionForm(data={'author': self.author2.pk, 'publication': self.pub1.pk, 'edition': 1, 'isbn': '9783161487777'})
self.assertEqual(form.errors, {'__all__': ['Edition with this Publication and Edition already exists.']})
class EmptyFieldsTestCase(TestCase):
"Tests for fields=() cases as reported in #14119"
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
def test_empty_fields_to_fields_for_model(self):
"An argument of fields=() to fields_for_model should return an empty dictionary"
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"No fields on a ModelForm should actually result in no fields"
form = self.EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"No fields should be set on a model instance if construct_instance receives fields=()"
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super(CustomMetaclass, cls).__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(six.with_metaclass(CustomMetaclass, forms.ModelForm)):
pass
class CustomMetaclassTestCase(TestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class TestTicket19733(TestCase):
def test_modelform_factory_without_fields(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
# This should become an error once deprecation cycle is complete.
form = modelform_factory(Person)
self.assertEqual(w[0].category, DeprecationWarning)
def test_modelform_factory_with_all_fields(self):
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
# $Id: gdal_polygonize.py 27044 2014-03-16 23:41:27Z rouault $
#
# Project: GDAL Python Interface
# Purpose: Application for converting raster data to a vector polygon layer.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
#******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
try:
from osgeo import gdal, ogr, osr
except ImportError:
import gdal, ogr, osr
import sys
import os.path
def Usage():
print("""
gdal_polygonize [-8] [-nomask] [-mask filename] raster_file [-b band]
[-q] [-f ogr_format] out_file [layer] [fieldname]
""")
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
format = 'GML'
options = []
quiet_flag = 0
src_filename = None
src_band_n = 1
dst_filename = None
dst_layername = None
dst_fieldname = None
dst_field = -1
mask = 'default'
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-f':
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-8':
options.append('8CONNECTED=8')
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-b':
i = i + 1
src_band_n = int(argv[i])
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
elif dst_layername is None:
dst_layername = argv[i]
elif dst_fieldname is None:
dst_fieldname = argv[i]
else:
Usage()
i = i + 1
if src_filename is None or dst_filename is None:
Usage()
if dst_layername is None:
dst_layername = 'out'
# =============================================================================
# Verify we have next gen bindings with the polygonize method.
# =============================================================================
try:
gdal.Polygonize
except:
print('')
print('gdal.Polygonize() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
sys.exit(1)
# =============================================================================
# Open source file
# =============================================================================
src_ds = gdal.Open( src_filename )
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
srcband = src_ds.GetRasterBand(src_band_n)
if mask is 'default':
maskband = srcband.GetMaskBand()
elif mask is 'none':
maskband = None
else:
mask_ds = gdal.Open( mask )
maskband = mask_ds.GetRasterBand(1)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
dst_ds = ogr.Open( dst_filename, update=1 )
gdal.PopErrorHandler()
except:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
drv = ogr.GetDriverByName(format)
if not quiet_flag:
print('Creating output %s of format %s.' % (dst_filename, format))
dst_ds = drv.CreateDataSource( dst_filename )
# =============================================================================
# Find or create destination layer.
# =============================================================================
try:
dst_layer = dst_ds.GetLayerByName(dst_layername)
except:
dst_layer = None
if dst_layer is None:
srs = None
if src_ds.GetProjectionRef() != '':
srs = osr.SpatialReference()
srs.ImportFromWkt( src_ds.GetProjectionRef() )
dst_layer = dst_ds.CreateLayer(dst_layername, srs = srs )
if dst_fieldname is None:
dst_fieldname = 'DN'
fd = ogr.FieldDefn( dst_fieldname, ogr.OFTInteger )
dst_layer.CreateField( fd )
dst_field = 0
else:
if dst_fieldname is not None:
dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)
if dst_field < 0:
print("Warning: cannot find field '%s' in layer '%s'" % (dst_fieldname, dst_layername))
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress
result = gdal.Polygonize( srcband, maskband, dst_layer, dst_field, options,
callback = prog_func )
srcband = None
src_ds = None
dst_ds = None
mask_ds = None
| |
import Queue
import json
import os
import signal
from subprocess import CalledProcessError
import sys
import threading
from mysos.common.cluster import ClusterListener, get_cluster_path
from mysos.common.zookeeper import parse
from .installer import PackageInstaller
from .state import StateManager
from .task_runner import TaskError, TaskRunner, TaskRunnerProvider
from .task_control import TaskControl
from kazoo.client import KazooClient
from twitter.common import log
from twitter.common.concurrent import defer
from twitter.common.zookeeper.serverset.endpoint import Endpoint, ServiceInstance
class MysosTaskRunnerProvider(TaskRunnerProvider):
def __init__(self, task_control_provider, installer_provider, backup_store_provider):
self._task_control_provider = task_control_provider
self._installer_provider = installer_provider
self._backup_store_provider = backup_store_provider
def from_task(self, task, sandbox):
data = json.loads(task.data)
cluster_name, host, port, zk_url = data['cluster'], data['host'], data['port'], data['zk_url']
_, servers, path = parse(zk_url)
kazoo = KazooClient(servers)
kazoo.start()
self_instance = ServiceInstance(Endpoint(host, port))
try:
task_control = self._task_control_provider.from_task(task, sandbox)
installer = self._installer_provider.from_task(task, sandbox)
backup_store = self._backup_store_provider.from_task(task, sandbox)
except (TaskControl.Error, PackageInstaller.Error) as e:
raise TaskError(e.message)
state_manager = StateManager(sandbox, backup_store)
return MysosTaskRunner(
self_instance,
kazoo,
get_cluster_path(path, cluster_name),
installer,
task_control,
state_manager)
class MysosTaskRunner(TaskRunner):
"""
A runner that manages the lifecycle of a MySQL task (through the provided 'task_control').
The task is executed as a long-running process its return code can be obtained using 'join()'.
Thread-safety:
This class is accessed from the MysosExecutor thread (not the ExecutorDriver thread because
MysosExecutor invokes operations asynchronously) and the ClusterListener thread and is
thread-safe.
TODO(jyx): Push the knowledge of the underlying subprocess down to the task control and stop the
the subprocess using the task control.
"""
def __init__(self, self_instance, kazoo, cluster_root, installer, task_control, state_manager):
"""
:param self_instance: The local ServiceInstance associated with this task runner.
:param kazoo: Kazoo client, it should be started before being passed in.
:param cluster_root: The ZooKeeper root path for *this cluster*.
:param installer: The PackageInstaller for MySQL.
:param task_control: The TaskControl that interacts with the task process.
:param state_manager: The StateManager for managing the executor state.
"""
self._installer = installer
self._env = None # The environment variables for the 'task_control' commands. Set by the
# installer.
self._task_control = task_control
self._state_manager = state_manager
self._lock = threading.Lock()
self._popen = None # The singleton task process started by '_task_control'.
self._started = False # Indicates whether start() has already been called.
self._stopping = False # Indicates whether stop() has already been called.
self._exited = threading.Event() # Set when the task process has exited.
self._result = Queue.Queue() # The returncode returned by the task process or an exception.
# Public events and queue.
self.promoted = threading.Event()
self.demoted = threading.Event()
self.master = Queue.Queue() # Set when a master change is detected.
self._listener = ClusterListener(
kazoo,
cluster_root,
self_instance,
promotion_callback=self._on_promote,
demotion_callback=self._on_demote,
master_callback=self._on_master_change) # Listener started by start().
# --- Public interface. ---
def start(self):
"""
Start the runner in a separate thread and wait for the task process to be forked.
"""
with self._lock:
if self._started:
raise TaskError("Runner already started")
self._started = True
# Can potentially hold the lock for a long time but it's OK since the runner is not accessed
# by multiple threads until after it's started; can be a noop as well, depending on the
# installer implementation.
try:
# 1. Install the application.
self._env = self._installer.install()
log.info("Package installation completed. Resulting environment variables: %s" % self._env)
# 2. Restore/initialize the application state.
self._state_manager.bootstrap(self._task_control, self._env)
log.info("Executor state fully bootstrapped")
# 3. Start the task subprocess.
# Store the process so we can kill it if necessary.
self._popen = self._task_control.start(env=self._env)
log.info("Task started in subprocess %s" % self._popen.pid)
defer(self._wait)
# 4. Start monitoring.
# Only start listening to ZK events after the task subprocess has been successfully started.
self._listener.start()
except (PackageInstaller.Error, StateManager.Error, CalledProcessError) as e:
raise TaskError("Failed to start MySQL task: %s" % e)
def _wait(self):
# Block until the subprocess exits and delivers the return code.
self._result.put(self._popen.wait())
# Notify stop() if it is waiting.
self._exited.set()
def stop(self, timeout=10):
"""
Stop the runner and wait for its thread (and the sub-processes) to exit.
:param timeout: The timeout that the process should die before a hard SIGKILL is issued
(SIGTERM is used initially).
:return: True if an active runner is stopped, False if the runner is not started or already
stopping/stopped.
"""
if not self._started:
log.warn("Cannot stop the runner because it's not started")
return False
if self._stopping:
log.warn("The runner is already stopping/stopped")
return False
with self._lock:
log.info("Stopping runner")
self._stopping = True
if not self._popen:
log.info("The runner task did not start successfully so no need to kill it")
return False
try:
log.info("Terminating process group: %s" % self._popen.pid)
os.killpg(self._popen.pid, signal.SIGTERM)
except OSError as e:
log.info("The sub-processes are already terminated: %s" % e)
return False
log.info("Waiting for process to terminate due to SIGTERM")
# Escalate to SIGKILL if SIGTERM is not sufficient.
if not self._exited.wait(timeout=timeout):
with self._lock:
try:
log.warn("Killing process group %s which failed to terminate cleanly within %s secs" %
(self._popen.pid, timeout))
os.killpg(self._popen.pid, signal.SIGKILL)
except OSError as e:
log.info("The sub-processes are already terminated: %s" % e)
return False
log.info("Waiting for process to terminate due to SIGKILL")
if not self._exited.wait(timeout=timeout):
raise TaskError("Failed to kill process group %s" % self._popen.pid)
return True
def get_log_position(self):
"""
Get the log position of the MySQL slave. Return None if it cannot be obtained.
"""
try:
log_position = self._task_control.get_log_position(env=self._env)
return log_position
except CalledProcessError as e:
raise TaskError("Unable to get the slave's log position: %s" % e)
def join(self):
"""
Wait for the runner to terminate.
:return: The return code of the subprocess. NOTE: A negative value -N indicates that the
child was terminated by signal N (on Unix).
:exception: The TaskError exception due to an error in task control operations.
"""
# Using 'sys.maxint' makes this forever wait interruptible.
result = self._result.get(True, sys.maxint)
if isinstance(result, Exception):
raise result
else:
return result
# --- ClusterListener handlers. ---
def _on_promote(self):
self.promoted.set()
if not self._exited.is_set():
defer(self._promote)
def _promote(self):
try:
self._task_control.promote(env=self._env)
except CalledProcessError as e:
self._result.put(TaskError("Failed to promote the slave: %s" % e))
self.stop()
def _on_demote(self):
"""
Executor shuts itself down when demoted.
"""
self.demoted.set()
# Stop the runner asynchronously.
if not self._exited.is_set():
log.info("Shutting down runner because it is demoted.")
# Call stop() asynchronously because this callback is invoked from the Kazoo thread which we
# don't want to block.
defer(self.stop)
def _on_master_change(self, master):
self.master.put(master)
if not self._exited.is_set():
defer(lambda: self._reparent(master))
def _reparent(self, master):
try:
self._task_control.reparent(
master.service_endpoint.host,
master.service_endpoint.port,
env=self._env)
except CalledProcessError as e:
self._result.put(TaskError("Failed to reparent the slave: %s" % e))
self.stop()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
from ...extern.six.moves import zip
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/Documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(
r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from ... import units as u
from ...units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'D', 'F', 'G', 'H', 'Hz', 'J', 'Jy', 'K', 'N',
'Ohm', 'Pa', 'R', 'Ry', 'S', 'T', 'V', 'W', 'Wb', 'a',
'adu', 'arcmin', 'arcsec', 'barn', 'beam', 'bin', 'cd',
'chan', 'count', 'ct', 'd', 'deg', 'eV', 'erg', 'g', 'h',
'lm', 'lx', 'lyr', 'm', 'mag', 'min', 'mol', 'pc', 'ph',
'photon', 'pix', 'pixel', 'rad', 'rad', 's', 'solLum',
'solMass', 'solRad', 'sr', 'u', 'voxel', 'yr'
]
binary_bases = [
'bit', 'byte', 'B'
]
simple_units = [
'Angstrom', 'angstrom', 'AU', 'au', 'Ba', 'dB', 'mas'
]
si_prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
binary_prefixes = [
'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei'
]
deprecated_units = set([
'a', 'angstrom', 'Angstrom', 'au', 'Ba', 'barn', 'ct',
'erg', 'G', 'ph', 'pix'
])
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ['pct', 'pcount', 'yd'])
do_defines(binary_bases, si_prefixes + binary_prefixes, ['dB', 'dbyte'])
do_defines(simple_units, [''])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ('unknown', 'UNKNOWN'):
return None
if s == '':
return core.dimensionless_unscaled
if s.count('/') > 1:
raise core.UnitsError(
"'{0}' contains multiple slashes, which is "
"disallowed by the VOUnit standard".format(s))
result = cls._do_parse(s, debug=debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported in "
"VOUnit.")
return result
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if cls._explicit_custom_unit_regex.match(unit):
return cls._def_custom_unit(unit)
if not cls._custom_unit_regex.match(unit):
raise ValueError()
warnings.warn(
"Unit {0!r} not supported by the VOUnit "
"standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)),
core.UnitsWarning)
return cls._def_custom_unit(unit)
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'VOUnit',
cls._to_decomposed_alternative)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'da' "
"(deka) prefix".format(unit))
elif unit._represents.scale == 0.1:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'd' "
"(deci) prefix".format(unit))
name = unit.get_format_name('vounit')
if unit in six.itervalues(cls._custom_units):
return name
if name not in cls._units:
raise ValueError(
"Unit {0!r} is not part of the VOUnit standard".format(name))
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, 'VOUnit',
cls._to_decomposed_alternative)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={'vounit': name},
namespace=cls._custom_units)
else:
return core.def_unit(
name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix):]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(factor, [base_unit], [1],
_error_check=False),
format={'vounit': prefix + base_unit.names[-1]},
namespace=cls._custom_units)
return def_base(unit)
@classmethod
def to_string(cls, unit):
from .. import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == 'dimensionless' and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
"Multiply your data by {0:e}."
.format(unit.scale))
s = ''
if unit.scale != 1:
m, ex = utils.split_mantissa_exponent(unit.scale)
parts = []
if m:
parts.append(m)
if ex:
fex = '10'
if not ex.startswith('-'):
fex += '+'
fex += ex
parts.append(fex)
s += ' '.join(parts)
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from .. import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
cls.to_string(unit), scale)
return s
| |
# -*- coding: utf-8 -*-
import os
import pytest
import pipenv.utils
from pipenv.exceptions import PipenvUsageError
# Pipfile format <-> requirements.txt format.
DEP_PIP_PAIRS = [
({"requests": "*"}, "requests"),
({"requests": {"extras": ["socks"], "version": "*"}}, "requests[socks]"),
({"django": ">1.10"}, "django>1.10"),
({"Django": ">1.10"}, "Django>1.10"),
({"requests": {"extras": ["socks"], "version": ">1.10"}}, "requests[socks]>1.10"),
({"requests": {"extras": ["socks"], "version": "==1.10"}}, "requests[socks]==1.10"),
(
{
"pinax": {
"git": "git://github.com/pinax/pinax.git",
"ref": "1.4",
"editable": True,
}
},
"-e git+git://github.com/pinax/pinax.git@1.4#egg=pinax",
),
(
{"pinax": {"git": "git://github.com/pinax/pinax.git", "ref": "1.4"}},
"git+git://github.com/pinax/pinax.git@1.4#egg=pinax",
),
( # Mercurial.
{
"MyProject": {
"hg": "http://hg.myproject.org/MyProject",
"ref": "da39a3ee5e6b",
}
},
"hg+http://hg.myproject.org/MyProject@da39a3ee5e6b#egg=MyProject",
),
( # SVN.
{
"MyProject": {
"svn": "svn://svn.myproject.org/svn/MyProject",
"editable": True,
}
},
"-e svn+svn://svn.myproject.org/svn/MyProject#egg=MyProject",
),
(
# Extras in url
{
"discord.py": {
"file": "https://github.com/Rapptz/discord.py/archive/async.zip",
"extras": ["voice"],
}
},
"https://github.com/Rapptz/discord.py/archive/async.zip#egg=discord.py[voice]",
),
(
{
"requests": {
"git": "https://github.com/requests/requests.git",
"ref": "master",
"extras": ["security"],
"editable": False,
}
},
"git+https://github.com/requests/requests.git@master#egg=requests[security]",
),
]
def mock_unpack(link, source_dir, download_dir, only_download=False, session=None,
hashes=None, progress_bar="off"):
return
@pytest.mark.utils
@pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS)
@pytest.mark.needs_internet
def test_convert_deps_to_pip(monkeypatch, deps, expected):
with monkeypatch.context() as m:
import pip_shims
m.setattr(pip_shims.shims, "unpack_url", mock_unpack)
if expected.startswith("Django"):
expected = expected.lower()
assert pipenv.utils.convert_deps_to_pip(deps, r=False) == [expected]
@pytest.mark.utils
@pytest.mark.parametrize(
"deps, expected",
[
# This one should be collapsed and treated as {'requests': '*'}.
({"requests": {}}, "requests"),
# Hash value should be passed into the result.
(
{
"FooProject": {
"version": "==1.2",
"hash": "sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
}
},
"FooProject==1.2 --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
),
(
{
"FooProject": {
"version": "==1.2",
"extras": ["stuff"],
"hash": "sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
}
},
"FooProject[stuff]==1.2 --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
),
(
{
"requests": {
"git": "https://github.com/requests/requests.git",
"ref": "master",
"extras": ["security"],
}
},
"git+https://github.com/requests/requests.git@master#egg=requests[security]",
),
],
)
def test_convert_deps_to_pip_one_way(deps, expected):
assert pipenv.utils.convert_deps_to_pip(deps, r=False) == [expected.lower()]
@pytest.mark.skipif(isinstance(u"", str), reason="don't need to test if unicode is str")
@pytest.mark.utils
def test_convert_deps_to_pip_unicode():
deps = {u"django": u"==1.10"}
deps = pipenv.utils.convert_deps_to_pip(deps, r=False)
assert deps[0] == "django==1.10"
class TestUtils:
"""Test utility functions in pipenv"""
@pytest.mark.utils
@pytest.mark.parametrize(
"version, specified_ver, expected",
[
("*", "*", True),
("2.1.6", "==2.1.4", False),
("20160913", ">=20140815", True),
(
"1.4",
{"svn": "svn://svn.myproj.org/svn/MyProj", "version": "==1.4"},
True,
),
("2.13.0", {"extras": ["socks"], "version": "==2.12.4"}, False),
],
)
def test_is_required_version(self, version, specified_ver, expected):
assert pipenv.utils.is_required_version(version, specified_ver) is expected
@pytest.mark.utils
@pytest.mark.parametrize(
"entry, expected",
[
({"git": "package.git", "ref": "v0.0.1"}, True),
({"hg": "https://package.com/package", "ref": "v1.2.3"}, True),
("*", False),
({"some_value": 5, "other_value": object()}, False),
("package", False),
("git+https://github.com/requests/requests.git#egg=requests", True),
("git+git@github.com:requests/requests.git#egg=requests", True),
("gitdb2", False),
],
)
@pytest.mark.vcs
def test_is_vcs(self, entry, expected):
from pipenv.vendor.requirementslib.utils import is_vcs
assert is_vcs(entry) is expected
@pytest.mark.utils
def test_python_version_from_bad_path(self):
assert pipenv.utils.python_version("/fake/path") is None
@pytest.mark.utils
def test_python_version_from_non_python(self):
assert pipenv.utils.python_version("/dev/null") is None
@pytest.mark.utils
@pytest.mark.parametrize(
"version_output, version",
[
("Python 3.6.2", "3.6.2"),
("Python 3.6.2 :: Continuum Analytics, Inc.", "3.6.2"),
("Python 3.6.20 :: Continuum Analytics, Inc.", "3.6.20"),
(
"Python 3.5.3 (3f6eaa010fce78cc7973bdc1dfdb95970f08fed2, Jan 13 2018, 18:14:01)\n[PyPy 5.10.1 with GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]",
"3.5.3",
),
],
)
# @patch(".vendor.pythonfinder.utils.get_python_version")
def test_python_version_output_variants(
self, monkeypatch, version_output, version
):
def mock_version(path):
return version_output.split()[1]
monkeypatch.setattr("pipenv.vendor.pythonfinder.utils.get_python_version", mock_version)
assert pipenv.utils.python_version("some/path") == version
@pytest.mark.utils
@pytest.mark.windows
@pytest.mark.skipif(os.name != "nt", reason="Windows test only")
def test_windows_shellquote(self):
test_path = r"C:\Program Files\Python36\python.exe"
expected_path = '"C:\\\\Program Files\\\\Python36\\\\python.exe"'
assert pipenv.utils.escape_grouped_arguments(test_path) == expected_path
@pytest.mark.utils
def test_is_valid_url(self):
url = "https://github.com/psf/requests.git"
not_url = "something_else"
assert pipenv.utils.is_valid_url(url)
assert pipenv.utils.is_valid_url(not_url) is False
@pytest.mark.utils
@pytest.mark.needs_internet
def test_download_file(self):
url = "https://github.com/pypa/pipenv/blob/master/README.md"
output = "test_download.md"
pipenv.utils.download_file(url, output)
assert os.path.exists(output)
os.remove(output)
@pytest.mark.utils
@pytest.mark.parametrize('line, expected', [
("python", True),
("python3.7", True),
("python2.7", True),
("python2", True),
("python3", True),
("pypy3", True),
("anaconda3-5.3.0", True),
("which", False),
("vim", False),
("miniconda", True),
("micropython", True),
("ironpython", True),
("jython3.5", True),
("2", True),
("2.7", True),
("3.7", True),
("3", True)
])
def test_is_python_command(self, line, expected):
assert pipenv.utils.is_python_command(line) == expected
@pytest.mark.utils
def test_new_line_end_of_toml_file(this):
# toml file that needs clean up
toml = """
[dev-packages]
"flake8" = ">=3.3.0,<4"
pytest = "*"
mock = "*"
sphinx = "<=1.5.5"
"-e ." = "*"
twine = "*"
"sphinx-click" = "*"
"pytest-xdist" = "*"
"""
new_toml = pipenv.utils.cleanup_toml(toml)
# testing if the end of the generated file contains a newline
assert new_toml[-1] == "\n"
@pytest.mark.utils
@pytest.mark.parametrize(
"input_path, expected",
[
(
"c:\\Program Files\\Python36\\python.exe",
"C:\\Program Files\\Python36\\python.exe",
),
(
"C:\\Program Files\\Python36\\python.exe",
"C:\\Program Files\\Python36\\python.exe",
),
("\\\\host\\share\\file.zip", "\\\\host\\share\\file.zip"),
("artifacts\\file.zip", "artifacts\\file.zip"),
(".\\artifacts\\file.zip", ".\\artifacts\\file.zip"),
("..\\otherproject\\file.zip", "..\\otherproject\\file.zip"),
],
)
@pytest.mark.skipif(os.name != "nt", reason="Windows file paths tested")
def test_win_normalize_drive(self, input_path, expected):
assert pipenv.utils.normalize_drive(input_path) == expected
@pytest.mark.utils
@pytest.mark.parametrize(
"input_path, expected",
[
("/usr/local/bin/python", "/usr/local/bin/python"),
("artifacts/file.zip", "artifacts/file.zip"),
("./artifacts/file.zip", "./artifacts/file.zip"),
("../otherproject/file.zip", "../otherproject/file.zip"),
],
)
@pytest.mark.skipif(os.name == "nt", reason="*nix file paths tested")
def test_nix_normalize_drive(self, input_path, expected):
assert pipenv.utils.normalize_drive(input_path) == expected
@pytest.mark.utils
@pytest.mark.parametrize(
"sources, expected_args",
[
(
[{"url": "https://test.example.com/simple", "verify_ssl": True}],
["-i", "https://test.example.com/simple"],
),
(
[{"url": "https://test.example.com/simple", "verify_ssl": False}],
[
"-i",
"https://test.example.com/simple",
"--trusted-host",
"test.example.com",
],
),
(
[{"url": "https://test.example.com:12345/simple", "verify_ssl": False}],
[
"-i",
"https://test.example.com:12345/simple",
"--trusted-host",
"test.example.com:12345",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com/simple"},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com/simple",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com/simple", "verify_ssl": False},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com/simple",
"--trusted-host",
"custom.example.com",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com:12345/simple", "verify_ssl": False},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com:12345/simple",
"--trusted-host",
"custom.example.com:12345",
],
),
(
[
{"url": "https://pypi.org/simple"},
{
"url": "https://user:password@custom.example.com/simple",
"verify_ssl": False,
},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://user:password@custom.example.com/simple",
"--trusted-host",
"custom.example.com",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://user:password@custom.example.com/simple"},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://user:password@custom.example.com/simple",
],
),
(
[
{
"url": "https://user:password@custom.example.com/simple",
"verify_ssl": False,
},
],
[
"-i",
"https://user:password@custom.example.com/simple",
"--trusted-host",
"custom.example.com",
],
),
],
)
def test_prepare_pip_source_args(self, sources, expected_args):
assert (
pipenv.utils.prepare_pip_source_args(sources, pip_args=None)
== expected_args
)
@pytest.mark.utils
def test_invalid_prepare_pip_source_args(self):
sources = [{}]
with pytest.raises(PipenvUsageError):
pipenv.utils.prepare_pip_source_args(sources, pip_args=None)
@pytest.mark.utils
def test_parse_python_version(self):
ver = pipenv.utils.parse_python_version("Python 3.6.5\n")
assert ver == {"major": "3", "minor": "6", "micro": "5"}
@pytest.mark.utils
def test_parse_python_version_suffix(self):
ver = pipenv.utils.parse_python_version("Python 3.6.5rc1\n")
assert ver == {"major": "3", "minor": "6", "micro": "5"}
@pytest.mark.utils
def test_parse_python_version_270(self):
ver = pipenv.utils.parse_python_version("Python 2.7\n")
assert ver == {"major": "2", "minor": "7", "micro": "0"}
@pytest.mark.utils
def test_parse_python_version_270_garbage(self):
ver = pipenv.utils.parse_python_version("Python 2.7+\n")
assert ver == {"major": "2", "minor": "7", "micro": "0"}
| |
import logging
from six.moves.urllib.parse import urljoin
from xml.etree import ElementTree
import recurly
import recurly.js as js
from recurly.errors import *
from recurly.resource import Resource, Money, PageError
"""
Recurly's Python client library is an interface to its REST API.
Please see the Recurly API documentation for more information:
https://dev.recurly.com/docs/getting-started
"""
__version__ = '2.2.15'
BASE_URI = 'https://%s.recurly.com/v2/'
"""The API endpoint to send requests to."""
SUBDOMAIN = 'api'
"""The subdomain of the site authenticating API requests."""
API_KEY = None
"""The API key to use when authenticating API requests."""
API_VERSION = '2.1'
"""The API version to use when making API requests."""
CA_CERTS_FILE = None
"""A file contianing a set of concatenated certificate authority certs
for validating the server against."""
DEFAULT_CURRENCY = 'USD'
"""The currency to use creating `Money` instances when one is not specified."""
SOCKET_TIMEOUT_SECONDS = None
"""The number of seconds after which to timeout requests to the Recurly API.
If unspecified, the global default timeout is used."""
def base_uri():
if SUBDOMAIN is None:
raise ValueError('recurly.SUBDOMAIN not set')
return BASE_URI % SUBDOMAIN
def api_version():
return API_VERSION
class Address(Resource):
nodename = 'address'
attributes = (
'address1',
'address2',
'city',
'state',
'zip',
'country',
'phone',
)
class Account(Resource):
"""A customer account."""
member_path = 'accounts/%s'
collection_path = 'accounts'
nodename = 'account'
attributes = (
'account_code',
'username',
'email',
'first_name',
'last_name',
'company_name',
'vat_number',
'tax_exempt',
'entity_use_code',
'accept_language',
'created_at',
)
_classes_for_nodename = {'address': Address}
sensitive_attributes = ('number', 'verification_value',)
def to_element(self):
elem = super(Account, self).to_element()
# Make sure the account code is always included in a serialization.
if 'account_code' not in self.__dict__: # not already included
try:
account_code = self.account_code
except AttributeError:
pass
else:
elem.append(self.element_for_value('account_code', account_code))
if 'billing_info' in self.__dict__:
elem.append(self.billing_info.to_element())
if 'address' in self.__dict__:
elem.append(self.address.to_element())
return elem
@classmethod
def all_active(cls, **kwargs):
"""Return a `Page` of active customer accounts.
This is a convenience method for `Account.all(state='active')`.
"""
return cls.all(state='active', **kwargs)
@classmethod
def all_closed(cls, **kwargs):
"""Return a `Page` of closed customer accounts.
This is a convenience method for `Account.all(state='closed')`.
"""
return cls.all(state='closed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due customer accounts.
This is a convenience method for `Account.all(state='past_due').
"""
return cls.all(state='past_due', **kwargs)
@classmethod
def all_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are subscribers.
This is a convenience method for `Account.all(state='subscriber').
"""
return cls.all(state='subscriber', **kwargs)
@classmethod
def all_non_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are not subscribers.
This is a convenience method for `Account.all(state='non_subscriber').
"""
return cls.all(state='non_subscriber', **kwargs)
def __getattr__(self, name):
if name == 'billing_info':
try:
billing_info_url = self._elem.find('billing_info').attrib['href']
except (AttributeError, KeyError):
raise AttributeError(name)
resp, elem = BillingInfo.element_for_url(billing_info_url)
return BillingInfo.from_element(elem)
try:
return super(Account, self).__getattr__(name)
except AttributeError:
if name == 'address':
self.address = Address()
return self.address
else:
raise AttributeError(name)
def charge(self, charge):
"""Charge (or credit) this account with the given `Adjustment`."""
url = urljoin(self._url, '%s/adjustments' % self.account_code)
return charge.post(url)
def invoice(self, **kwargs):
"""Create an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '%s/invoices' % self.account_code)
if kwargs:
response = self.http_request(url, 'POST', Invoice(**kwargs), {'Content-Type':
'application/xml; charset=utf-8'})
else:
response = self.http_request(url, 'POST')
if response.status != 201:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice = Invoice.from_element(elem)
invoice._url = response.getheader('Location')
return invoice
def build_invoice(self):
"""Preview an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '%s/invoices/preview' % self.account_code)
response = self.http_request(url, 'POST')
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice = Invoice.from_element(elem)
return invoice
def notes(self):
"""Fetch Notes for this account."""
url = urljoin(self._url, '%s/notes' % self.account_code)
return Note.paginated(url)
def redemption(self):
try:
return self.redemptions()[0]
except AttributeError:
raise AttributeError("redemption")
def reopen(self):
"""Reopen a closed account."""
url = urljoin(self._url, '%s/reopen' % self.account_code)
response = self.http_request(url, 'PUT')
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def subscribe(self, subscription):
"""Create the given `Subscription` for this existing account."""
url = urljoin(self._url, '%s/subscriptions' % self.account_code)
return subscription.post(url)
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '%s/billing_info' % self.account_code)
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml))
class BillingInfo(Resource):
"""A set of billing information for an account."""
nodename = 'billing_info'
attributes = (
'type',
'name_on_account',
'first_name',
'last_name',
'number',
'verification_value',
'year',
'month',
'start_month',
'start_year',
'issue_number',
'company',
'address1',
'address2',
'city',
'state',
'zip',
'country',
'phone',
'vat_number',
'ip_address',
'ip_address_country',
'card_type',
'first_six',
'last_four',
'paypal_billing_agreement_id',
'amazon_billing_agreement_id',
'token_id',
'account_type',
'routing_number',
'account_number',
)
sensitive_attributes = ('number', 'verification_value', 'account_number')
xml_attribute_attributes = ('type',)
class Coupon(Resource):
"""A coupon for a customer to apply to their account."""
member_path = 'coupons/%s'
collection_path = 'coupons'
nodename = 'coupon'
attributes = (
'coupon_code',
'name',
'discount_type',
'discount_percent',
'discount_in_cents',
'redeem_by_date',
'invoice_description',
'single_use',
'applies_for_months',
'duration',
'temporal_unit',
'temporal_amount',
'max_redemptions',
'applies_to_all_plans',
'applies_to_non_plan_charges',
'redemption_resource',
'created_at',
'plan_codes',
'hosted_description',
'max_redemptions_per_account',
)
@classmethod
def value_for_element(cls, elem):
if not elem or elem.tag != 'plan_codes' or elem.attrib.get('type') != 'array':
return super(Coupon, cls).value_for_element(elem)
return [code_elem.text for code_elem in elem]
@classmethod
def element_for_value(cls, attrname, value):
if attrname != 'plan_codes':
return super(Coupon, cls).element_for_value(attrname, value)
elem = ElementTree.Element(attrname)
elem.attrib['type'] = 'array'
for code in value:
code_el = ElementTree.Element('plan_code')
code_el.text = code
elem.append(code_el)
return elem
@classmethod
def all_redeemable(cls, **kwargs):
"""Return a `Page` of redeemable coupons.
This is a convenience method for `Coupon.all(state='redeemable')`.
"""
return cls.all(state='redeemable', **kwargs)
@classmethod
def all_expired(cls, **kwargs):
"""Return a `Page` of expired coupons.
This is a convenience method for `Coupon.all(state='expired')`.
"""
return cls.all(state='expired', **kwargs)
@classmethod
def all_maxed_out(cls, **kwargs):
"""Return a `Page` of coupons that have been used the maximum
number of times.
This is a convenience method for `Coupon.all(state='maxed_out')`.
"""
return cls.all(state='maxed_out', **kwargs)
def has_unlimited_redemptions_per_account(self):
return self.max_redemptions_per_account == None
class Redemption(Resource):
"""A particular application of a coupon to a customer account."""
nodename = 'redemption'
attributes = (
'account_code',
'single_use',
'total_discounted_in_cents',
'subscription_uuid',
'currency',
'created_at',
)
def delete_url(self):
return self._url + "s/" + self.uuid
class TaxDetail(Resource):
"""A charge's tax breakdown"""
nodename = 'taxdetail'
inherits_currency = True
attributes = (
'name',
'type',
'tax_rate',
'tax_in_cents',
)
class Adjustment(Resource):
"""A charge or credit applied (or to be applied) to an account's invoice."""
nodename = 'adjustment'
member_path = 'adjustments/%s'
attributes = (
'uuid',
'description',
'accounting_code',
'quantity',
'unit_amount_in_cents',
'discount_in_cents',
'tax_in_cents',
'tax_type',
'tax_region',
'tax_rate',
'total_in_cents',
'currency',
'tax_exempt',
'tax_code',
'tax_details',
'start_date',
'end_date',
'created_at',
'type',
)
xml_attribute_attributes = ('type',)
_classes_for_nodename = {'tax_detail': TaxDetail,}
# This can be removed when the `original_adjustment_uuid` is moved to a link
def __getattr__(self, name):
if name == 'original_adjustment':
try:
uuid = super(Adjustment, self).__getattr__('original_adjustment_uuid')
except (AttributeError):
return super(Adjustment, self).__getattr__(name)
return lambda: Adjustment.get(uuid)
else:
return super(Adjustment, self).__getattr__(name)
class Invoice(Resource):
"""A payable charge to an account for the customer's charges and
subscriptions."""
member_path = 'invoices/%s'
collection_path = 'invoices'
nodename = 'invoice'
attributes = (
'uuid',
'state',
'invoice_number',
'invoice_number_prefix',
'po_number',
'vat_number',
'subtotal_in_cents',
'tax_in_cents',
'tax_type',
'tax_rate',
'total_in_cents',
'currency',
'created_at',
'line_items',
'transactions',
'terms_and_conditions',
'customer_notes',
'address',
'closed_at',
)
blacklist_attributes = (
'currency',
)
def invoice_number_with_prefix(self):
return '%s%s' % (self.invoice_number_prefix, self.invoice_number)
@classmethod
def all_open(cls, **kwargs):
"""Return a `Page` of open invoices.
This is a convenience method for `Invoice.all(state='open')`.
"""
return cls.all(state='open', **kwargs)
@classmethod
def all_collected(cls, **kwargs):
"""Return a `Page` of collected invoices.
This is a convenience method for `Invoice.all(state='collected')`.
"""
return cls.all(state='collected', **kwargs)
@classmethod
def all_failed(cls, **kwargs):
"""Return a `Page` of failed invoices.
This is a convenience method for `Invoice.all(state='failed')`.
"""
return cls.all(state='failed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due invoices.
This is a convenience method for `Invoice.all(state='past_due')`.
"""
return cls.all(state='past_due', **kwargs)
@classmethod
def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read()
def refund_amount(self, amount_in_cents, refund_apply_order = 'credit'):
amount_element = self.refund_open_amount_xml(amount_in_cents, refund_apply_order)
return self._create_refund_invoice(amount_element)
def refund(self, adjustments, refund_apply_order = 'credit'):
adjustments_element = self.refund_line_items_xml(adjustments, refund_apply_order)
return self._create_refund_invoice(adjustments_element)
def refund_open_amount_xml(self, amount_in_cents, refund_apply_order):
elem = ElementTree.Element(self.nodename)
elem.append(Resource.element_for_value('refund_apply_order', refund_apply_order))
elem.append(Resource.element_for_value('amount_in_cents',
amount_in_cents))
return elem
def refund_line_items_xml(self, line_items, refund_apply_order):
elem = ElementTree.Element(self.nodename)
elem.append(Resource.element_for_value('refund_apply_order', refund_apply_order))
line_items_elem = ElementTree.Element('line_items')
for item in line_items:
adj_elem = ElementTree.Element('adjustment')
adj_elem.append(Resource.element_for_value('uuid',
item['adjustment'].uuid))
adj_elem.append(Resource.element_for_value('quantity',
item['quantity']))
adj_elem.append(Resource.element_for_value('prorate', item['prorate']))
line_items_elem.append(adj_elem)
elem.append(line_items_elem)
return elem
def _create_refund_invoice(self, element):
url = urljoin(self._url, '%s/refund' % (self.invoice_number, ))
body = ElementTree.tostring(element, encoding='UTF-8')
refund_invoice = Invoice()
refund_invoice.post(url, body)
return refund_invoice
def redemption(self):
try:
return self.redemptions()[0]
except AttributeError:
raise AttributeError("redemption")
class Subscription(Resource):
"""A customer account's subscription to your service."""
member_path = 'subscriptions/%s'
collection_path = 'subscriptions'
nodename = 'subscription'
attributes = (
'uuid',
'state',
'plan_code',
'coupon_code',
'coupon_codes',
'quantity',
'activated_at',
'canceled_at',
'starts_at',
'expires_at',
'current_period_started_at',
'current_period_ends_at',
'trial_started_at',
'trial_ends_at',
'unit_amount_in_cents',
'tax_in_cents',
'tax_type',
'tax_rate',
'total_billing_cycles',
'remaining_billing_cycles',
'timeframe',
'currency',
'subscription_add_ons',
'account',
'pending_subscription',
'net_terms',
'collection_method',
'po_number',
'first_renewal_date',
'bulk',
'terms_and_conditions',
'customer_notes',
'vat_reverse_charge_notes',
'bank_account_authorized_at',
'redemptions',
)
sensitive_attributes = ('number', 'verification_value', 'bulk')
def preview(self):
if hasattr(self, '_url'):
url = self._url + '/preview'
return self.post(url)
else:
url = urljoin(recurly.base_uri(), self.collection_path) + '/preview'
return self.post(url)
def update_notes(self, **kwargs):
"""Updates the notes on the subscription without generating a change"""
for key, val in kwargs.iteritems():
setattr(self, key, val)
url = urljoin(self._url, '%s/notes' % self.uuid)
self.put(url)
def _update(self):
if not hasattr(self, 'timeframe'):
self.timeframe = 'now'
return super(Subscription, self)._update()
def __getpath__(self, name):
if name == 'plan_code':
return 'plan/plan_code'
else:
return name
class TransactionBillingInfo(recurly.Resource):
node_name = 'billing_info'
attributes = (
'first_name',
'last_name',
'address1',
'address2',
'city',
'state',
'country',
'zip',
'phone',
'vat_number',
'first_six',
'last_four',
'card_type',
'month',
'year',
'transaction_uuid',
)
class TransactionAccount(recurly.Resource):
node_name = 'account'
attributes = (
'first_name',
'last_name',
'company',
'email',
'account_code',
)
_classes_for_nodename = {'billing_info': TransactionBillingInfo}
class TransactionDetails(recurly.Resource):
node_name = 'details'
attributes = ('account')
_classes_for_nodename = {'account': TransactionAccount}
class TransactionError(recurly.Resource):
node_name = 'transaction_error'
attributes = (
'id',
'merchant_message',
'error_caterogy',
'customer_message',
'error_code',
'gateway_error_code',
)
class Transaction(Resource):
"""An immediate one-time charge made to a customer's account."""
member_path = 'transactions/%s'
collection_path = 'transactions'
nodename = 'transaction'
attributes = (
'uuid',
'action',
'account',
'currency',
'amount_in_cents',
'tax_in_cents',
'status',
'reference',
'test',
'voidable',
'description',
'refundable',
'cvv_result',
'avs_result',
'avs_result_street',
'avs_result_postal',
'created_at',
'details',
'transaction_error',
'type',
'ip_address',
'tax_exempt',
'tax_code',
'accounting_code',
)
xml_attribute_attributes = ('type',)
sensitive_attributes = ('number', 'verification_value',)
_classes_for_nodename = {
'details': TransactionDetails,
'transaction_error': TransactionError
}
def _handle_refund_accepted(self, response):
if response.status != 202:
self.raise_http_error(response)
self._refund_transaction_url = response.getheader('Location')
return self
def get_refund_transaction(self):
"""Retrieve the refund transaction for this transaction, immediately
after refunding.
After calling `refund()` to refund a transaction, call this method to
retrieve the new transaction representing the refund.
"""
try:
url = self._refund_transaction_url
except AttributeError:
raise ValueError("No refund transaction is available for this transaction")
resp, elem = self.element_for_url(url)
value = self.value_for_element(elem)
return value
def refund(self, **kwargs):
"""Refund this transaction.
Calling this method returns the refunded transaction (that is,
``self``) if the refund was successful, or raises a `ResponseError` if
an error occurred requesting the refund. After a successful call to
`refund()`, to retrieve the new transaction representing the refund,
use the `get_refund_transaction()` method.
"""
# Find the URL and method to refund the transaction.
try:
selfnode = self._elem
except AttributeError:
raise AttributeError('refund')
url, method = None, None
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == 'refund':
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
if url is None or method is None:
raise AttributeError("refund") # should do something more specific probably
actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted)
return actionator(**kwargs)
Transaction._classes_for_nodename['transaction'] = Transaction
class Plan(Resource):
"""A service level for your service to which a customer account
can subscribe."""
member_path = 'plans/%s'
collection_path = 'plans'
nodename = 'plan'
attributes = (
'plan_code',
'name',
'description',
'success_url',
'cancel_url',
'display_donation_amounts',
'display_quantity',
'display_phone_number',
'bypass_hosted_confirmation',
'unit_name',
'payment_page_tos_link',
'plan_interval_length',
'plan_interval_unit',
'trial_interval_length',
'trial_interval_unit',
'accounting_code',
'setup_fee_accounting_code',
'created_at',
'tax_exempt',
'tax_code',
'unit_amount_in_cents',
'setup_fee_in_cents',
'total_billing_cycles',
)
def get_add_on(self, add_on_code):
"""Return the `AddOn` for this plan with the given add-on code."""
url = urljoin(self._url, '%s/add_ons/%s' % (self.plan_code, add_on_code))
resp, elem = AddOn.element_for_url(url)
return AddOn.from_element(elem)
def create_add_on(self, add_on):
"""Make the given `AddOn` available to subscribers on this plan."""
url = urljoin(self._url, '%s/add_ons' % self.plan_code)
return add_on.post(url)
class AddOn(Resource):
"""An additional benefit a customer subscribed to a particular plan
can also subscribe to."""
nodename = 'add_on'
attributes = (
'add_on_code',
'name',
'display_quantity_on_hosted_page',
'display_quantity',
'default_quantity',
'accounting_code',
'unit_amount_in_cents',
'tax_code',
'created_at',
)
class SubscriptionAddOn(Resource):
"""A plan add-on as added to a customer's subscription.
Use these instead of `AddOn` instances when specifying a
`Subscription` instance's `subscription_add_ons` attribute.
"""
nodename = 'subscription_add_on'
inherits_currency = True
attributes = (
'add_on_code',
'quantity',
'unit_amount_in_cents',
'address',
)
class Note(Resource):
"""A customer account's notes."""
nodename = 'note'
collection_path = 'notes'
attributes = (
'message',
'created_at',
)
@classmethod
def from_element(cls, elem):
new_note = Note()
for child_el in elem:
if not child_el.tag:
continue
setattr(new_note, child_el.tag, child_el.text)
return new_note
Resource._learn_nodenames(locals().values())
def objects_for_push_notification(notification):
"""Decode a push notification with the given body XML.
Returns a dictionary containing the constituent objects of the push
notification. The kind of push notification is given in the ``"type"``
member of the returned dictionary.
"""
notification_el = ElementTree.fromstring(notification)
objects = {'type': notification_el.tag}
for child_el in notification_el:
tag = child_el.tag
res = Resource.value_for_element(child_el)
objects[tag] = res
return objects
| |
from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| |
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from __future__ import with_statement
import os
import urlparse
from tests.unit import unittest
from httpretty import HTTPretty
from boto.connection import AWSQueryConnection, AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
from boto.compat import json
class TestListParamsSerialization(unittest.TestCase):
maxDiff = None
def setUp(self):
self.connection = AWSQueryConnection('access_key', 'secret_key')
def test_complex_list_serialization(self):
# This example is taken from the doc string of
# build_complex_list_params.
params = {}
self.connection.build_complex_list_params(
params, [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')],
'ParamName.member', ('One', 'Two', 'Three'))
self.assertDictEqual({
'ParamName.member.1.One': 'foo',
'ParamName.member.1.Two': 'bar',
'ParamName.member.1.Three': 'baz',
'ParamName.member.2.One': 'foo2',
'ParamName.member.2.Two': 'bar2',
'ParamName.member.2.Three': 'baz2',
}, params)
def test_simple_list_serialization(self):
params = {}
self.connection.build_list_params(
params, ['foo', 'bar', 'baz'], 'ParamName.member')
self.assertDictEqual({
'ParamName.member.1': 'foo',
'ParamName.member.2': 'bar',
'ParamName.member.3': 'baz',
}, params)
class MockAWSService(AWSQueryConnection):
"""
Fake AWS Service
This is used to test the AWSQueryConnection object is behaving properly.
"""
APIVersion = '2012-01-01'
def _required_auth_capability(self):
return ['sign-v2']
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, host=None, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None,
validate_certs=True):
self.region = region
if host is None:
host = self.region.endpoint
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
host, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs)
class TestAWSAuthConnection(unittest.TestCase):
def test_get_path(self):
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False
)
# Test some sample paths for mangling.
self.assertEqual(conn.get_path('/'), '/')
self.assertEqual(conn.get_path('image.jpg'), '/image.jpg')
self.assertEqual(conn.get_path('folder/image.jpg'), '/folder/image.jpg')
self.assertEqual(conn.get_path('folder//image.jpg'), '/folder//image.jpg')
# Ensure leading slashes aren't removed.
# See https://github.com/boto/boto/issues/1387
self.assertEqual(conn.get_path('/folder//image.jpg'), '/folder//image.jpg')
self.assertEqual(conn.get_path('/folder////image.jpg'), '/folder////image.jpg')
self.assertEqual(conn.get_path('///folder////image.jpg'), '///folder////image.jpg')
def test_connection_behind_proxy(self):
os.environ['http_proxy'] = "http://john.doe:p4ssw0rd@127.0.0.1:8180"
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False
)
self.assertEqual(conn.proxy, '127.0.0.1')
self.assertEqual(conn.proxy_user, 'john.doe')
self.assertEqual(conn.proxy_pass, 'p4ssw0rd')
self.assertEqual(conn.proxy_port, '8180')
del os.environ['http_proxy']
def test_connection_behind_proxy_without_explicit_port(self):
os.environ['http_proxy'] = "http://127.0.0.1"
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False,
port=8180
)
self.assertEqual(conn.proxy, '127.0.0.1')
self.assertEqual(conn.proxy_port, 8180)
del os.environ['http_proxy']
class TestAWSQueryConnection(unittest.TestCase):
def setUp(self):
self.region = RegionInfo(name='cc-zone-1',
endpoint='mockservice.cc-zone-1.amazonaws.com',
connection_cls=MockAWSService)
HTTPretty.enable()
def tearDown(self):
HTTPretty.disable()
class TestAWSQueryConnectionSimple(TestAWSQueryConnection):
def test_query_connection_basis(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
self.assertEqual(conn.host, 'mockservice.cc-zone-1.amazonaws.com')
def test_query_connection_noproxy(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
os.environ['no_proxy'] = self.region.endpoint
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
proxy="NON_EXISTENT_HOSTNAME",
proxy_port="3128")
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
del os.environ['no_proxy']
args = urlparse.parse_qs(HTTPretty.last_request.body)
self.assertEqual(args['AWSAccessKeyId'], ['access_key'])
def test_query_connection_noproxy_nosecure(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'insecure'}),
content_type='application/json')
os.environ['no_proxy'] = self.region.endpoint
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
proxy="NON_EXISTENT_HOSTNAME",
proxy_port="3128",
is_secure = False)
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
del os.environ['no_proxy']
args = urlparse.parse_qs(HTTPretty.last_request.body)
self.assertEqual(args['AWSAccessKeyId'], ['access_key'])
def test_single_command(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
args = urlparse.parse_qs(HTTPretty.last_request.body)
self.assertEqual(args['AWSAccessKeyId'], ['access_key'])
self.assertEqual(args['SignatureMethod'], ['HmacSHA256'])
self.assertEqual(args['Version'], [conn.APIVersion])
self.assertEqual(args['par1'], ['foo'])
self.assertEqual(args['par2'], ['baz'])
self.assertEqual(resp.read(), '{"test": "secure"}')
def test_multi_commands(self):
"""Check connection re-use"""
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp1 = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
body1 = urlparse.parse_qs(HTTPretty.last_request.body)
resp2 = conn.make_request('myCmd2',
{'par3': 'bar', 'par4': 'narf'},
"/",
"POST")
body2 = urlparse.parse_qs(HTTPretty.last_request.body)
self.assertEqual(body1['par1'], ['foo'])
self.assertEqual(body1['par2'], ['baz'])
with self.assertRaises(KeyError):
body1['par3']
self.assertEqual(body2['par3'], ['bar'])
self.assertEqual(body2['par4'], ['narf'])
with self.assertRaises(KeyError):
body2['par1']
self.assertEqual(resp1.read(), '{"test": "secure"}')
self.assertEqual(resp2.read(), '{"test": "secure"}')
def test_non_secure(self):
HTTPretty.register_uri(HTTPretty.POST,
'http://%s/' % self.region.endpoint,
json.dumps({'test': 'normal'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
is_secure=False)
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
self.assertEqual(resp.read(), '{"test": "normal"}')
def test_alternate_port(self):
HTTPretty.register_uri(HTTPretty.POST,
'http://%s:8080/' % self.region.endpoint,
json.dumps({'test': 'alternate'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
port=8080,
is_secure=False)
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
self.assertEqual(resp.read(), '{"test": "alternate"}')
def test_temp_failure(self):
responses = [HTTPretty.Response(body="{'test': 'fail'}", status=500),
HTTPretty.Response(body="{'test': 'success'}", status=200)]
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/temp_fail/' % self.region.endpoint,
responses=responses)
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
'/temp_fail/',
'POST')
self.assertEqual(resp.read(), "{'test': 'success'}")
def test_connection_close(self):
"""Check connection re-use after close header is received"""
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json',
connection='close')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
def mock_put_conn(*args, **kwargs):
raise Exception('put_http_connection should not be called!')
conn.put_http_connection = mock_put_conn
resp1 = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
# If we've gotten this far then no exception was raised
# by attempting to put the connection back into the pool
# Now let's just confirm the close header was actually
# set or we have another problem.
self.assertEqual(resp1.getheader('connection'), 'close')
def test_port_pooling(self):
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
port=8080)
# Pick a connection, then put it back
con1 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con1)
# Pick another connection, which hopefully is the same yet again
con2 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con2)
self.assertEqual(con1, con2)
# Change the port and make sure a new connection is made
conn.port = 8081
con3 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con3)
self.assertNotEqual(con1, con3)
class TestAWSQueryStatus(TestAWSQueryConnection):
def test_get_status(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'<status>ok</status>',
content_type='text/xml')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
self.assertEqual(resp, "ok")
def test_get_status_blank_error(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'',
content_type='text/xml')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
with self.assertRaises(BotoServerError):
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
def test_get_status_error(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'<status>error</status>',
content_type='text/xml',
status=400)
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
with self.assertRaises(BotoServerError):
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Translate between test paths and Descriptors.
Test paths describe a timeseries by its path in a tree of timeseries.
Descriptors describe a timeseries semantically by its characteristics.
Descriptors allow users to navigate timeseries use meaningful words like
"measurement" and "test case" instead of meaningless words like "subtest".
Test paths can be arbitrarily long, but there are a fixed number of semantic
characteristics. Multiple test path components may be joined into a single
characteristic.
These are timeseries Descriptors, not test suite descriptors, not line
descriptors, not fetch descriptors.
This translation layer should be temporary until the descriptor concept can be
pushed down into the Model layer.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import re
from google.appengine.ext import ndb
from dashboard.common import bot_configurations
from dashboard.common import stored_object
TEST_BUILD_TYPE = 'test'
REFERENCE_BUILD_TYPE = 'ref'
STATISTICS = ['avg', 'count', 'max', 'min', 'std', 'sum']
NO_MITIGATIONS_CASE = 'no-mitigations'
STATISTICS_REGEX = '(.*)_(%s)$' % '|'.join(STATISTICS + [
r'pct_[\d_]+',
r'ipr_[\d_]+',
r'ci_[\d]{3}(?:_lower|_upper)?',
])
# This stored object contains a list of test path component strings that must be
# joined with the subsequent test path component in order to form a composite
# test suite. Some are internal-only, but there's no need to store a separate
# list for external users since this data is not served, just used to parse test
# keys.
PARTIAL_TEST_SUITES_KEY = 'partial_test_suites'
# This stored object contains a list of test suites composed of 2 or more test
# path components. All composite test suites start with a partial test suite,
# but not all test suites that start with a partial test suite are composite.
COMPOSITE_TEST_SUITES_KEY = 'composite_test_suites'
# This stored object contains a list of prefixes of test suites that are
# transformed to allow the UI to group them.
GROUPABLE_TEST_SUITE_PREFIXES_KEY = 'groupable_test_suite_prefixes'
# This stored object contains a list of test suites whose measurements are
# composed of multiple test path components.
POLY_MEASUREMENT_TEST_SUITES_KEY = 'poly_measurement_test_suites'
# This stored object contains a list of test suites whose measurements and test
# cases are each composed of two test path components.
TWO_TWO_TEST_SUITES_KEY = 'two_two_test_suites'
# This stored object contains a list of test suites whose test cases are
# composed of two test path components.
ONE_TWO_TEST_SUITES_KEY = 'one_two_test_suites'
# This stored object contains a list of test suites whose test cases are
# partially duplicated to two test path components like 'prefix/prefix_suffix'.
COMPLEX_CASES_TEST_SUITES_KEY = 'complex_cases_test_suites'
class Descriptor(object):
"""Describe a timeseries by its characteristics.
Supports partial test paths (e.g. test suite paths) by allowing some
characteristics to be None.
"""
def __init__(self, test_suite=None, measurement=None, bot=None,
test_case=None, statistic=None, build_type=None):
self.test_suite = test_suite
self.measurement = measurement
self.bot = bot
self.test_case = test_case
self.statistic = statistic
self.build_type = build_type
def Clone(self):
return Descriptor(self.test_suite, self.measurement, self.bot,
self.test_case, self.statistic, self.build_type)
def __repr__(self):
return 'Descriptor(%r, %r, %r, %r, %r, %r)' % (
self.test_suite, self.measurement, self.bot, self.test_case,
self.statistic, self.build_type)
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
CONFIGURATION = {}
@classmethod
@ndb.tasklet
def _GetConfiguration(cls, key, default=None):
if key not in cls.CONFIGURATION:
cls.CONFIGURATION[key] = (yield stored_object.GetAsync(key)) or default
raise ndb.Return(cls.CONFIGURATION[key])
@classmethod
def ResetMemoizedConfigurationForTesting(cls):
cls.CONFIGURATION = {}
@classmethod
@ndb.tasklet
def _MeasurementCase(cls, test_suite, path):
if len(path) == 1:
raise ndb.Return((path.pop(0), None))
if test_suite.startswith('loading.'):
measurement = path.pop(0)
parts, path[:] = path[:], []
if len(parts) > 1 and parts[1].endswith('_' + parts[0]):
parts[1] = parts[1][:-(len(parts[0]) + 1)]
raise ndb.Return((measurement, ':'.join(parts)))
if test_suite.startswith('resource_sizes'):
parts, path[:] = path[:], []
raise ndb.Return((':'.join(parts), None))
if test_suite == 'sizes':
parts, path[:] = path[:], []
raise ndb.Return((':'.join(parts[:6]), ':'.join(parts[6:])))
complex_cases_test_suites = yield cls._GetConfiguration(
COMPLEX_CASES_TEST_SUITES_KEY, [])
if (test_suite.startswith('system_health') or
(test_suite in complex_cases_test_suites)):
measurement = path.pop(0)
prefix = path.pop(0)
if len(path) == 0:
raise ndb.Return((measurement, prefix.replace('_', ':')))
raise ndb.Return((measurement, path.pop(0).replace('_', ':').replace(
'long:running:tools', 'long_running_tools')))
one_two_test_suites = yield cls._GetConfiguration(
ONE_TWO_TEST_SUITES_KEY, [])
if test_suite in one_two_test_suites:
parts, path[:] = path[:], []
raise ndb.Return(parts[0], ':'.join(parts[1:]))
two_two_test_suites = yield cls._GetConfiguration(
TWO_TWO_TEST_SUITES_KEY, [])
if test_suite in two_two_test_suites:
parts, path[:] = path[:], []
raise ndb.Return(':'.join(parts[:2]), ':'.join(parts[2:]))
if test_suite in [
'memory.dual_browser_test', 'memory.top_10_mobile',
'v8:runtime_stats.top_25']:
measurement = path.pop(0)
case = path.pop(0)
if len(path) == 0:
raise ndb.Return((measurement, None))
raise ndb.Return((measurement, case + ':' + path.pop(0)))
if test_suite in (yield cls._GetConfiguration(
POLY_MEASUREMENT_TEST_SUITES_KEY, [])):
parts, path[:] = path[:], []
case = None
if parts[-1] == NO_MITIGATIONS_CASE:
case = parts.pop()
raise ndb.Return((':'.join(parts), case))
raise ndb.Return((path.pop(0), path.pop(0)))
@classmethod
def FromTestPathSync(cls, test_path):
return cls.FromTestPathAsync(test_path).get_result()
@classmethod
@ndb.tasklet
def FromTestPathAsync(cls, test_path):
"""Parse a test path into a Descriptor.
Args:
path: Array of strings of any length.
Returns:
Descriptor
"""
path = test_path.split('/')
if len(path) < 2:
raise ndb.Return(cls())
bot = path.pop(0) + ':' + path.pop(0)
if len(path) == 0:
raise ndb.Return(cls(bot=bot))
test_suite = path.pop(0)
if test_suite in (yield cls._GetConfiguration(PARTIAL_TEST_SUITES_KEY, [])):
if len(path) == 0:
raise ndb.Return(cls(bot=bot))
test_suite += ':' + path.pop(0)
if test_suite.startswith('resource_sizes '):
test_suite = 'resource_sizes:' + test_suite[16:-1]
else:
for prefix in (yield cls._GetConfiguration(
GROUPABLE_TEST_SUITE_PREFIXES_KEY, [])):
if test_suite.startswith(prefix):
test_suite = prefix[:-1] + ':' + test_suite[len(prefix):]
break
if len(path) == 0:
raise ndb.Return(cls(test_suite=test_suite, bot=bot))
build_type = TEST_BUILD_TYPE
if path[-1] == 'ref':
path.pop()
build_type = REFERENCE_BUILD_TYPE
elif path[-1].endswith('_ref'):
build_type = REFERENCE_BUILD_TYPE
path[-1] = path[-1][:-4]
if len(path) == 0:
raise ndb.Return(cls(
test_suite=test_suite, bot=bot, build_type=build_type))
measurement, test_case = yield cls._MeasurementCase(test_suite, path)
statistic = None
if measurement not in ['jank_count', 'exp_jank_count']:
stat_match = re.match(STATISTICS_REGEX, measurement)
if stat_match:
measurement, statistic = stat_match.groups()
if test_suite != 'graphics:GLBench' and path:
raise ValueError('Unable to parse %r' % test_path)
raise ndb.Return(cls(
test_suite=test_suite, bot=bot, measurement=measurement,
statistic=statistic, test_case=test_case, build_type=build_type))
def ToTestPathsSync(self):
return self.ToTestPathsAsync().get_result()
@ndb.tasklet
def ToTestPathsAsync(self):
# There may be multiple possible test paths for a given Descriptor.
if not self.bot:
raise ndb.Return(set())
test_paths = yield self._BotTestPaths()
if not self.test_suite:
raise ndb.Return(test_paths)
test_paths = yield self._AppendTestSuite(test_paths)
if not self.measurement:
raise ndb.Return(test_paths)
test_paths = yield self._AppendMeasurement(test_paths)
if self.statistic:
test_paths = {p + '_' + self.statistic for p in test_paths}
if self.test_case:
test_paths = yield self._AppendTestCase(test_paths)
if self.build_type == REFERENCE_BUILD_TYPE:
test_paths = self._AppendRef(test_paths)
raise ndb.Return(test_paths)
@ndb.tasklet
def _BotTestPaths(self):
master, slave = self.bot.split(':')
aliases = yield bot_configurations.GetAliasesAsync(slave)
raise ndb.Return({master + '/' + alias for alias in aliases})
@ndb.tasklet
def _AppendTestSuite(self, test_paths):
if self.test_suite.startswith('resource_sizes:'):
raise ndb.Return({p + '/resource_sizes (%s)' % self.test_suite[15:]
for p in test_paths})
composite_test_suites = yield self._GetConfiguration(
COMPOSITE_TEST_SUITES_KEY, [])
if self.test_suite in composite_test_suites:
raise ndb.Return({p + '/' + self.test_suite.replace(':', '/')
for p in test_paths})
first_part = self.test_suite.split(':')[0]
groupable_prefixes = yield self._GetConfiguration(
GROUPABLE_TEST_SUITE_PREFIXES_KEY, [])
for prefix in groupable_prefixes:
if prefix[:-1] == first_part:
raise ndb.Return({
p + '/' + prefix + self.test_suite[len(first_part) + 1:]
for p in test_paths})
raise ndb.Return({p + '/' + self.test_suite for p in test_paths})
@ndb.tasklet
def _AppendMeasurement(self, test_paths):
poly_measurement_test_suites = yield self._GetConfiguration(
POLY_MEASUREMENT_TEST_SUITES_KEY, [])
poly_measurement_test_suites += yield self._GetConfiguration(
TWO_TWO_TEST_SUITES_KEY, [])
if self.test_suite in poly_measurement_test_suites:
raise ndb.Return({p + '/' + self.measurement.replace(':', '/')
for p in test_paths})
raise ndb.Return({p + '/' + self.measurement for p in test_paths})
@ndb.tasklet
def _AppendTestCase(self, test_paths):
complex_cases_test_suites = yield self._GetConfiguration(
COMPLEX_CASES_TEST_SUITES_KEY, [])
if (self.test_suite.startswith('system_health') or
(self.test_suite in complex_cases_test_suites)):
test_case = self.test_case.split(':')
if test_case[0] == 'long_running_tools':
test_paths = {p + '/' + test_case[0] for p in test_paths}
else:
test_paths = {p + '/' + '_'.join(test_case[:2]) for p in test_paths}
raise ndb.Return({p + '/' + '_'.join(test_case) for p in test_paths})
if self.test_suite.startswith('loading.'):
raise ndb.Return({p + '/' + self.test_case.replace(':', '/') + extra
for p in test_paths
for extra in ['', '_' + self.test_case.split(':')[0]]})
poly_case_test_suites = [
'sizes',
'memory.dual_browser_test',
'memory.top_10_mobile',
'v8:runtime_stats.top_25',
]
poly_case_test_suites += yield self._GetConfiguration(
ONE_TWO_TEST_SUITES_KEY, [])
poly_case_test_suites += yield self._GetConfiguration(
TWO_TWO_TEST_SUITES_KEY, [])
if self.test_suite in poly_case_test_suites:
raise ndb.Return({p + '/' + self.test_case.replace(':', '/')
for p in test_paths})
raise ndb.Return({p + '/' + self.test_case for p in test_paths})
def _AppendRef(self, test_paths):
ref_test_paths = set()
for p in test_paths:
# A given test path will only use one of these suffixes, but there's no
# way to know which.
ref_test_paths.add(p + '_ref')
ref_test_paths.add(p + '/ref')
return ref_test_paths
| |
import collections
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.translation import deactivate
if six.PY3:
from types import SimpleNamespace
else:
class SimpleNamespace(object):
pass
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState(object):
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, 'saved_data'):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = list(settings.ALLOWED_HOSTS) + ['testserver']
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, **kwargs):
"""
Create the test databases.
"""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors()
old_names = []
for signature, (db_name, aliases) in test_databases.items():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get('TEST', {}).get('SERIALIZE', True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
number=index + 1,
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors():
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""
Destroy all the non-mirror databases.
"""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
number=index + 1,
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator(object):
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Acts as either a decorator or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
super(override_settings, self).__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
class override_system_checks(TestContextDecorator):
"""
Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super(override_system_checks, self).__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = self.new_checks
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = self.deployment_checks
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")
def is_quoted_unicode(s):
s = s.strip()
return len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super(ignore_warnings, self).__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""
Decorator or context manager to temporary override the script prefix.
"""
def __init__(self, prefix):
self.prefix = prefix
super(override_script_prefix, self).__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super(isolate_apps, self).__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""
Decorator to add tags to a test class or method.
"""
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
| |
#!/usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)
| |
# COPYRIGHT 2012, Michael Ball
# Licensed under BSD License (3-clause).
"""This file pulls the sets from from flickr for a given user"""
from __future__ import division
import flickrapi
import math
import sys
# my flickr credentials and info
api_key = '948b85af8b1b9df0a4d38febe7ce75d6'
api_secret = '2a394b9079033cf8'
uid = '64724295@N08'
# basic setup for flickr api - which uses a flickr object for all the work
flickr = flickrapi.FlickrAPI(api_key,api_secret)
sets = flickr.photosets_getList(user_id=uid)
# STUFF FOR THE FUTURE
# Add better error handling / checking to be a decent person.
# Add some tests
# Try actually doing TDD! :O
# Consider command line args and dymanic table shapes.
# THINGS EACH SET NEEDS
# Title and Caption
# if no caption resulting value is "None"
# Set Photo and a link to the set photo
# #############################################################
# Class Defs.
class IterRegistry(type):
def __iter__(cls):
return iter(cls._registry)
class flickrset(object):
"""A Simple object to represent sets in a format I like
each set has a title, description and a url, sid, and uid.
A set object will contain a list of dictionaries where each dictionary has the data for each set
"""
_registry = []
__metaclass__ = IterRegistry
def __init__(self):
"""docstring for __init__"""
return None
def new(self, title, description, sid, uid='64724295@N08'):
new_set = {"title":title,"description":description,"sid":sid,"uid":uid,"url":"http://www.flickr.com/photos/" + str(uid) + "/sets/" + str(sid) + "/"}
flickrset._registry.append(new_set)
def title(self,index):
"""returns the set title for a set in the list"""
if index > len(flickrset._registry)-1:
raise IndexError
return flickrset._registry[index]["title"]
def description(self,index):
"""returns the set description"""
if index > len(flickrset._registry)-1:
raise IndexError
return flickrset._registry[index]["description"]
def sid(self,index):
if index > len(flickrset._registry)-1:
raise IndexError
return flickrset._registry[index]["sid"]
def url(self,index):
"""docstring for url"""
if index > len(flickrset._registry)-1:
raise IndexError
return flickrset._registry[index]["url"]
def all(self):
"""docstring for all
Returns the class list of all sets.
"""
return flickrset._registry
def set(self,index):
"""Like the others, but it returns the whole dict"""
if index > len(flickrset._registry)-1:
raise IndexError
return flickrset._registry[index]
# Making the class iterable
def __iter__(self):
"""docstring for __iter__"""
return iter(flickrset._registry)
def next(self):
"""docstring for next"""
index = 0
if index >= len(flickrset._registry):
raise StopIteration
else:
ret = flickrset._registry[index]
index += 1
return ret
class photo(object):
"""class to represent a set photo
each photo needs a photo id and a URL
Flikr URL Format:
http://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_[mstzb].jpg
This will be implemented very similarly to the flickrset class which means it will have a list which contains dictionaries
"""
_registry = []
__metaclass__ = IterRegistry
flickr_size = 'q'
#this is the large sqaure size I'll use for building thumbs.
def __init__(self):
"""docstring for __init__"""
return None
def new(self, pid, sid, farmid, serverid, secret, uid='64724295@N08'):
"""
Doesn't require much, but it does require each photo
to be attached to a set.
"""
new_photo = { "pid":pid, "sid":sid, "url":None, "farmid":farmid,
"serverid":serverid, "secret":secret
}
photo._registry.append(new_photo)
# GETTER METHODS
# All take in an index and return the item requested for the dictionary.
def pid(self,index):
if index > len(photo._registry)-1:
raise IndexError
return photo._registry[index]["pid"]
def url(self,index):
if index > len(photo._registry)-1:
raise IndexError
elif not photo._registry[index]["url"]:
raise ValueError("Requested Value Not Present")
return photo._registry[index]["url"]
def sid(self,index):
"""
returns the sid of the set for which the photo is connected
for the purposes of my project
"""
if index > len(photo._registry)-1:
raise IndexError
return photo._registry[index]["sid"]
def farmid(self,index):
if index > len(photo._registry)-1:
raise IndexError
elif not photo._registry[index]["farmid"]:
raise ValueError("Requested Value Not Present")
return photo._registry[index]["farmid"]
def secret(self,index):
if index > len(photo._registry)-1:
raise IndexError
elif not photo._registry[index]["secret"]:
raise ValueError("Requested Value Not Present")
return photo._registry[index]["secret"]
def serverid(self,index):
if index > len(photo._registry)-1:
raise IndexError
elif not photo._registry[index]["serverid"]:
raise ValueError("Requested Value Not Present")
return photo._registry[index]["serverid"]
def image(self,index):
"""
Like the others, but it returns the whole dict, based on an index
"""
if index > len(photo._registry)-1:
raise IndexError
return photo._registry[index]
def all(self):
"""
returns all the images
"""
return photo._registry
def find_sid(self,sid):
"""
Returns the dictionary of the image based on the sid
"""
for img in photo._registry:
if img["sid"]==sid:
return img
raise ValueError("No images from for given sid")
# SETTER METHODS
# Setts take in an index (the image) and whatever values are required to build the object
# They also return have the option to return the value if by specifying True as the last arg
# If I really feel like it, I should have these methods use the getters....
def set_farmid(self,index,fid,ret=False):
"""docstring for set_farmid
Takes in an index, the farmid, and sets it.
Optionally returns the value too, if the last (optional) arg is false
"""
if index > len(photo._registry)-1:
raise IndexError
else:
photo._registry[index]["farmid"] = fid
if ret:
return fid
def set_serverid(self,index,serverid,ret=False):
"""docstring for set_serverid
Takes in an index, the serverid, and sets it.
Optionally returns the value too, if the last (optional) arg is false
"""
if index > len(photo._registry)-1:
raise IndexError
else:
photo._registry[index]["serverid"] = serverid
if ret:
return serverid
def set_secret(self,index,secret,ret=False):
"""docstring for set_secret
Takes in an index, the secret, and sets it.
Optionally returns the value too, if the last (optional) arg is false
"""
if index > len(photo._registry)-1:
raise IndexError
else:
photo._registry[index]["secret"] = secret
if ret:
return secret
def set_url(self,index,ret=False):
"""docstring for set_farmid
Takes in an index, and sets the URL based on existing variables.
A class size, farmid,serverid,photoid,and secret are REQUIRED
for the URL to work.
Optionally returns the value too, if the last (optional) arg is false
http://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_[mstzb].jpg
"""
if index > len(photo._registry)-1:
raise IndexError
img = photo._registry[index]
if not photo.flickr_size or not img["farmid"] or not img["serverid"] \
or not img["secret"] or not img["pid"]:
raise ValueError("One or more of the parameters if missing for \
the image: Farmid, Serverid, Secret, Photo ID,or Class Size. \n\
The URL could no be set.")
else:
new_url = ("http://farm" + img["farmid"] + ".staticflickr.com/"
+ img["serverid"] + "/" + img["pid"] + "_" + img["secret"]
+ "_" + photo.flickr_size + ".jpg") #PHEW.......
img["url"] = new_url
if ret:
return img["url"]
# Making the class iterable
def __iter__(self):
"""docstring for __iter__"""
return iter(photo._registry)
# ############################################################################
# now here is where I need to do the work to set up a list of sets
# Create Empty sets and empty images
all_sets = flickrset()
all_images = photo()
# get all sets and put them in the new class
for item in sets.find('photosets').findall('photoset'):
# Set init vals: UID, Title, Description, SID
exclude = ['Explored!', 'High School', 'Photos for Jill', 'Tumblr Images',
'New Zealand Trip 2011']
t = item.find('title').text
if t not in exclude: # exclude sets in the list of ones I don't want.
all_sets.new(t,item.find('description').text, item.attrib['id'])
# get the first image for each set and add it to the list
for s in all_sets:
sid = s["sid"]
cur_set = flickr.photosets_getPhotos(api_key=api_key,photoset_id=sid)
# set the current image to the primary image of each set
# img[0].keys()
# ['originalsecret', 'isfavorite', 'license', 'views', 'farm', 'media', 'server', 'dateuploaded', 'secret', 'safety_level', 'originalformat', 'rotation', 'id']
cur_img = cur_set[0].attrib["primary"]
img = flickr.photos_getInfo(api_key=api_key,photo_id=cur_img)[0]
# def new(self, pid, sid, farmid, serverid, secret, uid='64724295@N08'):
all_images.new(
cur_img,sid,img.attrib['farm'],
img.attrib['server'],
img.attrib['secret']
)
for i in range(len(all_images._registry)):
all_images.set_url(i)
# ####################################################################
# Setup and Gathering data done. Now to contruct the HTML
# HTML Tags
# tags generally follow the structure of the html name followed by
# b for begin and e for end
# trb: either none or 'photoslinks' for like rows
# img: 0 pho2, 1 an image link and 2 the set title
# divb: either setImg or caption
# a: 0 link to set and then 1 set title
# p: set description
tags = {
"tbegin":"""<table style="text-align: middle; margin-right:auto; \
margin-left: auto;">\n<tbody>""",
"tend":"""</tbody>\n</table>""",
"tre":"""</tr>""",
"trb":"""<tr class="{0}">""",
"tdb":"""<td>""",
"img":"""<img class="{0}" src="{1}" alt="{2}" />""",
"divb":"""<div class="{0}">""",
"dive":"""</div>""",
"a":"""<a href="{0}">{1}</a>""",
"p":"""<p>{0}</p>""",
"tde":"""</td>"""
}
# This needs serious refactoring....
def build_table():
"""Return a string which contains the HTML for the table."""
global tags
n = "\n"
table = """"""
table += tags["tbegin"] + n
num = len(all_sets._registry)
rows = int(math.ceil(num/3))
for i in range(1,rows+1):
set1 = all_sets.set(i*3-3) if (i*3-3)<num else None
set2 = all_sets.set(i*3-2) if (i*3-2)<num else None
set3 = None
# set3 = all_sets.set(i*3-1) if (i*3-1)<num else None
img1 = all_images.find_sid(set1["sid"]) if set1 else None
img2 = all_images.find_sid(set2["sid"]) if set2 else None
img3 = all_images.find_sid(set3["sid"]) if set3 else None
# handle sets rows with images
table += tags["trb"].format("") + n
if img1:
table += tags["tdb"] + n
table += tags['divb'].format("setImg") + n
table += tags['img'].format("pho2",img1['url'],set1['title']) + n
table += tags['a'].format(set1['url'],
tags['divb'].format("caption") + n \
+ tags['p'].format(set1['description']) + n + tags['dive'] + n)
table += tags['dive'] + n
table += tags["tde"] + n
if img2:
table += tags["tdb"] + n
table += tags['divb'].format("setImg") + n
table += tags['img'].format("pho2",img2['url'],set2['title']) + n
table += tags['a'].format(set2['url'],
tags['divb'].format("caption") + n \
+ tags['p'].format(set2['description']) + n + tags['dive'] + n)
table += tags['dive'] + n
table += tags["tde"] + n
if img3:
table += tags["tdb"] + n
table += tags['divb'].format("setImg") + n
table += tags['img'].format("pho2",img3['url'],set3['title']) + n
table += tags['a'].format(set3['url'],
tags['divb'].format("caption") + n \
+ tags['p'].format(set3['description']) + n + tags['dive'] + n)
table += tags['dive'] + n
table += tags["tde"] + n
table += tags["tre"] + n
# handle links rows
table += tags['trb'].format("photoslinks") + n
if img1:
table += tags["tdb"] + n
table += tags['a'].format(set1['url'],set1['title']) + n
table += tags["tde"] + n
if img2:
table += tags["tdb"] + n
table += tags['a'].format(set2['url'],set2['title']) + n
table += tags["tde"] + n
if img3:
table += tags["tdb"] + n
table += tags['a'].format(set3['url'],set3['title']) + n
table += tags["tde"] + n
table += tags['tre'] + n
table += tags['tend'] + n + """<br /> <br />"""
return table
precursor = """<p>Here's the place where you'll be able to find links to all of
my photos that are online. This is a bit of a small set right now, but please
check it out! Images are organized by types and by places where they were
taken, and everything is in alphabetical order. :)</p> \n
"""
sys.stdout.write(precursor + build_table())
| |
import pickle
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.utils._encode import _unique
from sklearn.utils._encode import _encode
from sklearn.utils._encode import _check_unknown
@pytest.mark.parametrize(
"values, expected",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']))],
ids=['int64', 'object', 'str'])
def test_encode_util(values, expected):
uniques = _unique(values)
assert_array_equal(uniques, expected)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
def test_encode_with_check_unknown():
# test for the check_unknown parameter of _encode()
uniques = np.array([1, 2, 3])
values = np.array([1, 2, 3, 4])
# Default is True, raise error
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques=uniques, check_unknown=True)
# dont raise error if False
_encode(values, uniques=uniques, check_unknown=False)
# parameter is ignored for object dtype
uniques = np.array(['a', 'b', 'c'], dtype=object)
values = np.array(['a', 'b', 'c', 'd'], dtype=object)
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques=uniques, check_unknown=False)
def _assert_check_unknown(values, uniques, expected_diff, expected_mask):
diff = _check_unknown(values, uniques)
assert_array_equal(diff, expected_diff)
diff, valid_mask = _check_unknown(values, uniques, return_mask=True)
assert_array_equal(diff, expected_diff)
assert_array_equal(valid_mask, expected_mask)
@pytest.mark.parametrize("values, uniques, expected_diff, expected_mask", [
(np.array([1, 2, 3, 4]),
np.array([1, 2, 3]),
[4],
[True, True, True, False]),
(np.array([2, 1, 4, 5]),
np.array([2, 5, 1]),
[4],
[True, True, False, True]),
(np.array([2, 1, np.nan]),
np.array([2, 5, 1]),
[np.nan],
[True, True, False]),
(np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True]),
(np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1]),
[4, np.nan],
[True, True, False, False]),
(np.array([2, 1, 4, 5]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True]),
(np.array(['a', 'b', 'c', 'd'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
np.array(['d'], dtype=object),
[True, True, True, False]),
(np.array(['d', 'c', 'a', 'b'], dtype=object),
np.array(['a', 'c', 'b'], dtype=object),
np.array(['d'], dtype=object),
[False, True, True, True]),
(np.array(['a', 'b', 'c', 'd']),
np.array(['a', 'b', 'c']),
np.array(['d']),
[True, True, True, False]),
(np.array(['d', 'c', 'a', 'b']),
np.array(['a', 'c', 'b']),
np.array(['d']),
[False, True, True, True]),
])
def test_check_unknown(values, uniques, expected_diff, expected_mask):
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [None, np.nan, float('nan')])
@pytest.mark.parametrize('pickle_uniques', [True, False])
def test_check_unknown_missing_values(missing_value, pickle_uniques):
# check for check_unknown with missing values with object dtypes
values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object)
uniques = np.array(['c', 'a', 'b', missing_value], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ['d']
expected_mask = [False, True, True, True, True]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object)
uniques = np.array(['c', 'a', 'b'], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ['d', missing_value]
expected_mask = [False, True, True, True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(['a', missing_value], dtype=object)
uniques = np.array(['a', 'b', 'z'], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = [missing_value]
expected_mask = [True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize('missing_value', [np.nan, None, float('nan')])
@pytest.mark.parametrize('pickle_uniques', [True, False])
def test_unique_util_missing_values_objects(missing_value, pickle_uniques):
# check for _unique and _encode with missing values with object dtypes
values = np.array(['a', 'c', 'c', missing_value, 'b'], dtype=object)
expected_uniques = np.array(['a', 'b', 'c', missing_value], dtype=object)
uniques = _unique(values)
if missing_value is None:
assert_array_equal(uniques, expected_uniques)
else: # missing_value == np.nan
assert_array_equal(uniques[:-1], expected_uniques[:-1])
assert np.isnan(uniques[-1])
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([0, 2, 2, 3, 1]))
def test_unique_util_missing_values_numeric():
# Check missing values in numerical values
values = np.array([3, 1, np.nan, 5, 3, np.nan], dtype=float)
expected_uniques = np.array([1, 3, 5, np.nan], dtype=float)
expected_inverse = np.array([1, 0, 3, 2, 1, 3])
uniques = _unique(values)
assert_array_equal(uniques, expected_uniques)
uniques, inverse = _unique(values, return_inverse=True)
assert_array_equal(uniques, expected_uniques)
assert_array_equal(inverse, expected_inverse)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, expected_inverse)
def test_unique_util_with_all_missing_values():
# test for all types of missing values for object dtype
values = np.array([np.nan, 'a', 'c', 'c', None, float('nan'),
None], dtype=object)
uniques = _unique(values)
assert_array_equal(uniques[:-1], ['a', 'c', None])
# last value is nan
assert np.isnan(uniques[-1])
expected_inverse = [3, 0, 1, 1, 2, 3, 2]
_, inverse = _unique(values, return_inverse=True)
assert_array_equal(inverse, expected_inverse)
def test_check_unknown_with_both_missing_values():
# test for both types of missing values for object dtype
values = np.array([np.nan, 'a', 'c', 'c', None, np.nan,
None], dtype=object)
diff = _check_unknown(values,
known_values=np.array(['a', 'c'], dtype=object))
assert diff[0] is None
assert np.isnan(diff[1])
diff, valid_mask = _check_unknown(
values, known_values=np.array(['a', 'c'], dtype=object),
return_mask=True)
assert diff[0] is None
assert np.isnan(diff[1])
assert_array_equal(valid_mask,
[False, True, True, True, False, False, False])
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""system.py Unit Tests
..moduleauthor:: Timothy Helton <timothy.j.helton@gmail.com>
"""
import logging
import os
import os.path as osp
import shutil
import subprocess
import pytest
import numpy as np
from strumenti.tests.fixtures import \
ChromalogLogCapture
from strumenti import system
lines = ['a\tb\tc\td\n', '\n', '1\t2\t3\t4\n', '5\t6\t7\t8\n']
# Test check_list
check_list = {'string': ('test', ['test']),
'tuple': (('test', 'tuple'), ['test', 'tuple']),
'list': (['test', 'list'], ['test', 'list']),
}
@pytest.mark.parametrize('variable, expected',
list(check_list.values()),
ids=list(check_list.keys()))
def test__check_list(variable, expected):
assert system.check_list(variable) == expected
# Test get_header
get_header = {'defaults': ({'path': 'test.txt', 'header_row': 0},
['a', 'b', 'c', 'd']),
'row 2': ({'path': 'test.txt', 'header_row': 2},
['1', '2', '3', '4']),
'row 2 str': ({'path': 'test.txt', 'header_row': '2'},
['1', '2', '3', '4']),
}
@pytest.fixture()
def fixture_get_header(tmpdir):
tmpdir.chdir()
with open('test.txt', 'w') as f:
f.write(''.join(lines))
with open('test_no_header.txt', 'w') as f:
f.write(''.join(lines[2:]))
@pytest.mark.usefixtures('fixture_get_header')
@pytest.mark.parametrize('kwargs, expected',
list(get_header.values()),
ids=list(get_header.keys()))
def test__get_header(kwargs, expected):
assert system.get_header(**kwargs) == expected
# Test flatten
flatten = {'lists ints floats': ([[1, 2, 3], [4, 5, 6], [7., 8., 9.]],
[1, 2, 3, 4, 5, 6, 7, 8, 9]),
'lists str': ([['this'], ['is'], ['a'], ['test']],
['this', 'is', 'a', 'test']),
'list int str': ([[1, 2, 3], 4, 'test'], [1, 2, 3, 4, 'test']),
'lists empty': ([[1, 2, 3], [], [7, 8, 9]], [1, 2, 3, 7, 8, 9]),
'tuples floats': ([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
[1, 2, 3, 4, 5, 6, 7, 8, 9]),
}
@pytest.mark.parametrize('matrix, expected',
list(flatten.values()),
ids=list(flatten.keys()))
def test__flatten(matrix, expected):
assert system.flatten(matrix) == expected
def test__flatten_empty():
with pytest.raises(TypeError):
system.flatten()
# Test logger_setup
output = (('test', 'DEBUG', 'debug'),
('test', 'INFO', 'info'),
('test', 'WARNING', 'warning'),
('test', 'ERROR', 'error'),
('test', 'CRITICAL', 'critical'))
logger_setup = {'debug': ({'name': 'test', 'master_level': logging.DEBUG},
output),
'info': ({'name': 'test', 'master_level': logging.INFO},
output[1:]),
'warning': ({'name': 'test', 'master_level': logging.WARNING},
output[2:]),
'error': ({'name': 'test', 'master_level': logging.ERROR},
output[3:]),
'critical': ({'name': 'test',
'master_level': logging.CRITICAL},
output[-1:]),
'file': ({'log_file': 'test.log', 'name': 'test_file',
'master_level': logging.CRITICAL},
(('test_file', 'CRITICAL', 'critical'),))
}
@pytest.mark.parametrize('kwargs, expected',
list(logger_setup.values()),
ids=list(logger_setup.keys()))
def test__logger_setup(tmpdir, kwargs, expected):
tmpdir.chdir()
with ChromalogLogCapture() as log_cap:
logger = system.logger_setup(**kwargs)
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
log_cap.filter_records()
log_cap.check(*expected)
if 'log_file' in kwargs.keys():
assert osp.isfile('test.log')
# Test load_file
load_file = {'lines': ({'path': 'test.txt'},
['line one\n', 'line two\n', 'line three\n']),
'str': ({'path': 'test.txt', 'all_lines': False},
'line one\nline two\nline three\n'),
'first n lines': ({'path': 'test.txt', 'all_lines': False,
'first_n_lines': 2},
['line one\n', 'line two\n']),
}
@pytest.fixture(scope='session')
def load_file_setup():
file_name = 'test.txt'
with open(file_name, 'w') as f:
f.write('line one\n')
f.write('line two\n')
f.write('line three\n')
return file_name
@pytest.mark.usefixtures('load_file_setup')
@pytest.mark.parametrize('kwargs, expected',
list(load_file.values()),
ids=list(load_file.keys()))
def test__load_file(kwargs, expected):
actual = system.load_file(**kwargs)
assert actual == expected
# Test load_record
load_record = {'header': ({'path': 'test.txt', 'header_row': 0,
'skip_rows': 2},
'a', np.array([1.0, 5.0]),
'd', np.array([4.0, 8.0])),
'header some cols': ({'path': 'test.txt', 'header_row': 0,
'skip_rows': 2, 'cols': (0, 3)},
'a', np.array([1.0, 5.0]),
'd', np.array([4.0, 8.0])),
'header formats': ({'path': 'test.txt', 'header_row': 0,
'skip_rows': 2,
'formats': ('f8', 'i4', 'f8', 'i4')},
'a', np.array([1.0, 5.0]),
'd', np.array([4, 8])),
'no header some cols': ({'path': 'test_no_header.txt',
'cols': (0, 3)},
'0', np.array([1.0, 5.0]),
'3', np.array([4.0, 8.0])),
'no header formats': ({'path': 'test_no_header.txt',
'names': ('one', 'two', 'three',
'four')},
'one', np.array([1.0, 5.0]),
'four', np.array([4.0, 8.0])),
}
@pytest.fixture()
def load_record_setup():
with open('test.txt', 'w') as f:
f.write(''.join(lines))
with open('test_no_header.txt', 'w') as f:
f.write(''.join(lines[2:]))
@pytest.mark.usefixtures('load_record_setup')
@pytest.mark.parametrize(('kwargs, a_key, a_expect, d_key, d_expect'),
list(load_record.values()),
ids=list(load_record.keys()))
def test__load_records(kwargs, a_key, a_expect, d_key, d_expect):
output = system.load_records(**kwargs)
assert np.all(output[a_key] == a_expect)
assert np.all(output[d_key] == d_expect)
# Test preserve_cwd
@pytest.fixture()
def preserve_cwd_setup(request):
original_dir = os.getcwd()
working_dir = osp.join(original_dir, 'junk')
file_name = 'junk.txt'
os.makedirs(working_dir, exist_ok=True)
def teardown():
shutil.rmtree(working_dir)
request.addfinalizer(teardown)
return {'original_dir': original_dir, 'working_dir': working_dir,
'file_name': file_name}
def test__preserve_cwd(preserve_cwd_setup):
@system.preserve_cwd(preserve_cwd_setup['working_dir'])
def test():
with open(preserve_cwd_setup['file_name'], 'w') as f:
f.close()
test()
assert osp.isfile(osp.join(preserve_cwd_setup['working_dir'],
preserve_cwd_setup['file_name']))
assert os.getcwd() == preserve_cwd_setup['original_dir']
# Test status
def test__status(capsys):
@system.status()
def print_num():
print('1, 2, 3')
print_num()
out, err = capsys.readouterr()
assert out.split()[:-1] == ['Execute:', 'print_num', '1,', '2,', '3',
'Completed:', 'print_num', '(runtime:']
# Test unzip
@pytest.fixture(scope='function')
def unzip_setup(request):
file_name = 'junk.txt'
with open(file_name, 'w') as f:
f.write('Test file')
subprocess.call(['gzip', file_name])
def teardown():
os.remove(file_name)
request.addfinalizer(teardown)
return file_name
def test__unzip(unzip_setup):
system.unzip_file('{}.gz'.format(unzip_setup))
with open(unzip_setup, 'r') as f:
text = f.read()
assert 'Test file' == text
# Test walk_dir
class TestWalkDir:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
tmpdir.chdir()
self.main_dir = osp.join(os.getcwd(), 'test_walk_dir')
self.extra_dir = osp.join(self.main_dir, 'extra')
os.makedirs(self.extra_dir, exist_ok=True)
@system.preserve_cwd(self.main_dir)
def make_files_1():
with open('main.png', 'w') as f:
f.write('.png file in main directory.')
with open('main.jpeg', 'w') as f:
f.write('.jpeg file in main directory.')
@system.preserve_cwd(self.extra_dir)
def make_files_2():
with open('extra.png', 'w') as f:
f.write('.png file in extra directory.')
with open('extra.inp', 'w') as f:
f.write('.inp file in extra directory.')
os.chdir(self.main_dir)
make_files_1()
make_files_2()
def test__no_files_to_find(self):
assert system.walk_dir('.txt') == []
def test__find_main_dir_only(self):
assert system.walk_dir('.jpeg') == [osp.join(self.main_dir,
'main.jpeg')]
def test__find_extra_dir_only(self):
assert (system.walk_dir('.inp') ==
[osp.join(self.main_dir, self.extra_dir, 'extra.inp')])
def test__find_both_dirs(self):
assert (system.walk_dir('.png') ==
[osp.join(self.main_dir, self.extra_dir, 'extra.png'),
osp.join(self.main_dir, 'main.png')])
# Test zip_file
@pytest.fixture(scope='function')
def zip_setup(tmpdir):
tmpdir.chdir()
file_name = 'junk.txt'
with open(file_name, 'w') as f:
f.write('Test file')
return file_name
def test__zip_file(zip_setup):
system.zip_file(zip_setup)
subprocess.call(['gunzip', '{}.gz'.format(zip_setup)])
with open(zip_setup, 'r') as f:
text = f.read()
assert 'Test file' == text
| |
import pytest
from diofant import (I, Matrix, MutableDenseMatrix, MutableSparseMatrix,
PurePoly, Rational, ShapeError, SparseMatrix, eye, ones,
zeros)
from diofant.abc import x, y, z
__all__ = ()
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
pytest.raises(TypeError, lambda: SparseMatrix(1, 2))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, (1, 3, 4, 5, 6)))
a = SparseMatrix((
(1, 0),
(0, 1)))
assert SparseMatrix(a) == a
a = MutableSparseMatrix([])
b = MutableDenseMatrix([1, 2])
assert a.row_join(b) == b
assert a.col_join(b) == b
assert type(a.row_join(b)) == type(a)
assert type(a.col_join(b)) == type(a)
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6)))
b = SparseMatrix((
(1, 2),
(3, 0)))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
c = b * x
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
# test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
del S[1, :]
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
del S[:, 1]
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
S.row_swap(0, 1)
assert S == SparseMatrix([[1, 0, 0],
[0, 1, 0],
[2, 0, 1]])
S.col_swap(0, 1)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
S.row_swap(0, 2)
assert S == SparseMatrix([[0, 2, 1],
[1, 0, 0],
[0, 1, 0]])
S.col_swap(0, 2)
assert S == SparseMatrix([[1, 2, 0],
[0, 0, 1],
[0, 1, 0]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
del a[0, :]
assert a == SparseMatrix(0, 2, [])
del b[:, 1]
assert b == SparseMatrix(1, 1, [1])
# test_determinant
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(((1, 1, 1),
(1, 2, 3),
(1, 3, 6))).det() == 1
assert SparseMatrix((( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4))).det() == -289
assert SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16))).det() == 0
assert SparseMatrix(((3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3))).det() == 275
assert SparseMatrix(((1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6))).det() == -55
assert SparseMatrix(((-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1))).det() == 11664
assert SparseMatrix((( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1))).det() == 123
# test_slicing
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]])[[1], [1]] == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0) == sparse_zeros(3)
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
B = SparseMatrix(3, 1, [3, 7, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
B = SparseMatrix(3, 1, [-1, 2, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method='CH') == sparse_eye(4)
assert A.inv(method='LDL') == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]])
assert a*0 == Matrix([[0, 0], [0, 0]])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]])
A = SparseMatrix(ones(3))
B = eye(3)
assert A.col_join(B) == Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# row join
A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
B = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
assert A.row_join(B) == Matrix([[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
# symmetric
assert not a.is_symmetric(simplify=False)
assert sparse_eye(3).is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactorMatrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, _ = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, _ = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
A = SparseMatrix(10, 10, {(0, 0): 18, (0, 9): 12, (1, 4): 18, (2, 7): 16, (3, 9): 12, (4, 2): 19, (5, 7): 16, (6, 2): 12, (9, 7): 18})
assert A.row_list() == [(0, 0, 18), (0, 9, 12), (1, 4, 18), (2, 7, 16), (3, 9, 12), (4, 2, 19), (5, 7, 16), (6, 2, 12), (9, 7, 18)]
assert A.col_list() == [(0, 0, 18), (4, 2, 19), (6, 2, 12), (1, 4, 18), (2, 7, 16), (5, 7, 16), (9, 7, 18), (0, 9, 12), (3, 9, 12)]
assert SparseMatrix.eye(2).nnz() == 2
M = SparseMatrix.eye(3)*2
M[1, 0] = -1
M.col_op(1, lambda v, i: v + 2*M[i, 0])
assert M == Matrix([[ 2, 4, 0], [-1, 0, 0], [ 0, 0, 2]])
M = SparseMatrix.zeros(3)
M.fill(1)
assert M == ones(3)
assert SparseMatrix(ones(0, 3)).tolist() == []
def test_eq():
A = SparseMatrix(((1, 2), (3, 4)))
assert A != 1
assert A != zeros(2, 1)
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert len(a._smat) + len(b._smat) - len((a + b)._smat) > 0
def test_errors():
pytest.raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, 1))
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
pytest.raises(TypeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_list([0, 1], set()))
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
pytest.raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
pytest.raises(ShapeError,
lambda: SparseMatrix(1, 2,
[1, 2]) + SparseMatrix(2, 1, [2, 1]))
pytest.raises(IndexError, lambda: SparseMatrix([1, 2, 3])[3, 0])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).applyfunc(1))
pytest.raises(ValueError, lambda: SparseMatrix([1, 2, 3]).reshape(2, 2))
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).cholesky())
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).LDLdecomposition())
pytest.raises(ValueError, lambda: SparseMatrix([[2, 3], [4, 1]]).add(1))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).row_join(Matrix([[1, 2]])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).col_join(Matrix([1, 2])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_matrix([1, 0],
Matrix([1, 2])))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[Rational(2, 3), Rational(1, 3), Rational(1, 6)],
[Rational(1, 3), Rational(2, 3), Rational(1, 3)],
[ 0, 0, Rational(1, 2)]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0, 1],
[0, 0, 1]]).solve([1, 1]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0], [0, 0],
[2, 1]]).solve([1, 1, 1]))
def test_hermitian():
a = SparseMatrix([[0, I], [-I, 0]])
assert a.is_hermitian
a = SparseMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
def test_fill():
a = SparseMatrix([[0, I], [-I, 0]])
a.fill(0)
assert a == Matrix([[0, 0], [0, 0]])
| |
# Copyright 2013 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OCCI Command Line Client
"""
from __future__ import print_function
import argparse
import logging
import sys
import pyocci
from pyocci import client
from pyocci import exceptions
from pyocci import utils
from pyocci.v1_1 import shell as shell_v1_1
DEFAULT_OCCI_API_VERSION = 1.1
logger = logging.getLogger(__name__)
class OcciArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(OcciArgumentParser, self).__init__(*args, **kwargs)
class OcciShell(object):
def get_parser(self):
parser = OcciArgumentParser(
prog="pyocci",
description=__doc__.strip(),
epilog=("Run 'pyocci help COMMAND' "
"for help on an specific command."),
add_help=False,
)
# Global arguments
parser.add_argument(
'-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument(
'--version',
action='version',
version=pyocci.__version__
)
parser.add_argument(
'--debug',
default=False,
action='store_true',
help="Print debugging output"
)
# API versioning
parser.add_argument(
'--occi-api-version',
metavar='<occi-api-ver>',
default=utils.env('OCCI_API_VERSION',
default=DEFAULT_OCCI_API_VERSION),
help='Accepts 1.1, defaults to env[OCCI_API_VERSION].'
)
# Connection arguments
parser.add_argument(
'--endpoint-url',
default=utils.env('OCCI_ENDPOINT_URL'),
help='Defaults to env[OCCI_ENDPOINT_URL].'
)
parser.add_argument(
'--occi-cacert',
metavar='<ca-certificate>',
default=utils.env('OCCI_CACERT', default=None),
help='Specify a CA bundle file to use in '
'verifying a TLS (https) server certificate. '
'Defaults to env[OCCI_CACERT]')
parser.add_argument(
'--insecure',
default=utils.env('OCCI_INSECURE', default=False),
action='store_true',
help="Explicitly allow pyocci to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution.")
# Authentication options
parser.add_argument(
"--auth-type",
default="voms",
help=("One of %s. Defaults to 'voms'" %
client.HTTPClient.auth_methods.keys())
)
parser.add_argument(
"--occi-username",
default=utils.env("OCCI_USERNAME"),
help="Defaults to env[OCCI_USERNAME]"
)
parser.add_argument(
"--occi-password",
default=utils.env("OCCI_PASSWORD"),
help="Defaults to env[OCCI_PASSWORD]"
)
parser.add_argument(
"--occi-group",
default=utils.env("OCCI_GROUP"),
help="Defaults to env[OCCI_GROUP]"
)
parser.add_argument(
"--x509-user-proxy",
default=utils.env("X509_USER_PROXY"),
help="Defaults to env[X509_USER_PROXY]"
)
return parser
def get_subcommand_parser(self, version):
parser = self.get_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'1.1': shell_v1_1,
}[version]
except KeyError:
actions_module = shell_v1_1
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
return parser
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
action_help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=action_help,
description=desc,
add_help=False,
)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""
Display help about this program or one of its subcommands.
"""
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exceptions.CommandError(
"'%s' is not a valid subcommand" % args.command
)
else:
self.parser.print_help()
def setup_debugging(self, debug):
if not debug:
return
streamhandler = logging.StreamHandler()
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
streamhandler.setFormatter(logging.Formatter(streamformat))
logger.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
def main(self, argv):
parser = self.get_parser()
(options, args) = parser.parse_known_args(argv)
self.setup_debugging(options.debug)
subcommand_parser = self.get_subcommand_parser(
options.occi_api_version)
self.parser = subcommand_parser
if options.help or not argv:
subcommand_parser.print_help()
return 0
args = subcommand_parser.parse_args(argv)
if args.func == self.do_help:
self.do_help(args)
return 0
(
endpoint_url,
auth_type,
username,
password,
group,
x509_user_proxy,
insecure,
) = (
args.endpoint_url,
args.auth_type,
args.occi_username,
args.occi_password,
args.occi_group,
args.x509_user_proxy,
args.insecure,
)
if not endpoint_url:
raise exceptions.CommandError("You must provide and endpoint url "
"via either --endpoint_url or "
"env[OCCI_ENDPOINT_URL]")
if auth_type not in client.HTTPClient.auth_methods.keys():
raise exceptions.CommandError(
"Specified 'auth_type' not supported, provided '%s', expected "
"one of %s" % (auth_type,
client.HTTPClient.auth_methods.keys())
)
if auth_type == "voms" and not x509_user_proxy:
raise exceptions.CommandError(
"If you are using VOMS authentication you must provide a valid"
" proxy file via either --x509_user_proxy or "
"env[X509_USER_PROXY]"
)
self.cs = client.Client(
options.occi_api_version,
endpoint_url,
auth_type,
username=username,
password=password,
group=group,
x509_user_proxy=x509_user_proxy,
http_log_debug=options.debug,
insecure=insecure,
)
args.func(self.cs, args)
def main():
try:
OcciShell().main(sys.argv[1:])
except Exception, e:
logger.debug(e, exc_info=1)
print("ERROR: %s" % unicode(e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
"""
Unit tests for the spike_train_generation module.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import unittest
import os
import warnings
import neo
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
from scipy.stats import kstest, expon
from quantities import ms, second, Hz, kHz, mV, dimensionless
import elephant.spike_train_generation as stgen
from elephant.statistics import isi
def pdiff(a, b):
"""Difference between a and b as a fraction of a
i.e. abs((a - b)/a)
"""
return abs((a - b)/a)
class AnalogSignalThresholdDetectionTestCase(unittest.TestCase):
def setUp(self):
pass
def test_threshold_detection(self):
# Test whether spikes are extracted at the correct times from
# an analog signal.
# Load membrane potential simulated using Brian2
# according to make_spike_extraction_test_data.py.
curr_dir = os.path.dirname(os.path.realpath(__file__))
npz_file_loc = os.path.join(curr_dir,'spike_extraction_test_data.npz')
iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
data = iom2.read()
vm = data[0].segments[0].analogsignals[0]
spike_train = stgen.threshold_detection(vm)
try:
len(spike_train)
except TypeError: # Handles an error in Neo related to some zero length
# spike trains being treated as unsized objects.
warnings.warn(("The spike train may be an unsized object. This may be related "
"to an issue in Neo with some zero-length SpikeTrain objects. "
"Bypassing this by creating an empty SpikeTrain object."))
spike_train = neo.core.SpikeTrain([],t_start=spike_train.t_start,
t_stop=spike_train.t_stop,
units=spike_train.units)
# Correct values determined previously.
true_spike_train = [0.0123, 0.0354, 0.0712, 0.1191,
0.1694, 0.22, 0.2711]
# Does threshold_detection gives the correct number of spikes?
self.assertEqual(len(spike_train),len(true_spike_train))
# Does threshold_detection gives the correct times for the spikes?
try:
assert_array_almost_equal(spike_train,spike_train)
except AttributeError: # If numpy version too old to have allclose
self.assertTrue(np.array_equal(spike_train,spike_train))
class AnalogSignalPeakDetectionTestCase(unittest.TestCase):
def setUp(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
data = iom2.read()
self.vm = data[0].segments[0].analogsignals[0]
self.true_time_stamps = [0.0124, 0.0354, 0.0713, 0.1192, 0.1695,
0.2201, 0.2711] * second
def test_peak_detection_time_stamps(self):
# Test with default arguments
result = stgen.peak_detection(self.vm)
self.assertEqual(len(self.true_time_stamps), len(result))
self.assertIsInstance(result, neo.core.SpikeTrain)
try:
assert_array_almost_equal(result, self.true_time_stamps)
except AttributeError:
self.assertTrue(np.array_equal(result, self.true_time_stamps))
def test_peak_detection_threshold(self):
# Test for empty SpikeTrain when threshold is too high
result = stgen.peak_detection(self.vm, threshold=30 * mV)
self.assertEqual(len(result), 0)
class AnalogSignalSpikeExtractionTestCase(unittest.TestCase):
def setUp(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
data = iom2.read()
self.vm = data[0].segments[0].analogsignals[0]
self.first_spike = np.array([-0.04084546, -0.03892033, -0.03664779,
-0.03392689, -0.03061474, -0.02650277,
-0.0212756, -0.01443531, -0.00515365,
0.00803962, 0.02797951, -0.07,
-0.06974495, -0.06950466, -0.06927778,
-0.06906314, -0.06885969, -0.06866651,
-0.06848277, -0.06830773, -0.06814071,
-0.06798113, -0.06782843, -0.06768213,
-0.06754178, -0.06740699, -0.06727737,
-0.06715259, -0.06703235, -0.06691635])
def test_spike_extraction_waveform(self):
spike_train = stgen.spike_extraction(self.vm.reshape(-1),
extr_interval = (-1*ms, 2*ms))
try:
assert_array_almost_equal(spike_train.waveforms[0][0].magnitude.reshape(-1),
self.first_spike)
except AttributeError:
self.assertTrue(
np.array_equal(spike_train.waveforms[0][0].magnitude,
self.first_spike))
class HomogeneousPoissonProcessTestCase(unittest.TestCase):
def setUp(self):
pass
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
np.random.seed(seed=12345)
for rate in [123.0*Hz, 0.123*kHz]:
for t_stop in [2345*ms, 2.345*second]:
spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
intervals = isi(spiketrain)
expected_spike_count = int((rate * t_stop).simplified)
self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.2) # should fail about 1 time in 1000
expected_mean_isi = (1/rate)
self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.2)
expected_first_spike = 0*ms
self.assertLess(spiketrain[0] - expected_first_spike, 7*expected_mean_isi)
expected_last_spike = t_stop
self.assertLess(expected_last_spike - spiketrain[-1], 7*expected_mean_isi)
# Kolmogorov-Smirnov test
D, p = kstest(intervals.rescale(t_stop.units),
"expon",
args=(0, expected_mean_isi.rescale(t_stop.units)), # args are (loc, scale)
alternative='two-sided')
self.assertGreater(p, 0.001)
self.assertLess(D, 0.12)
def test_low_rates(self):
spiketrain = stgen.homogeneous_poisson_process(0*Hz, t_stop=1000*ms)
self.assertEqual(spiketrain.size, 0)
# not really a test, just making sure that all code paths are covered
for i in range(10):
spiketrain = stgen.homogeneous_poisson_process(1*Hz, t_stop=1000*ms)
def test_buffer_overrun(self):
np.random.seed(6085) # this seed should produce a buffer overrun
t_stop=1000*ms
rate = 10*Hz
spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
expected_last_spike = t_stop
expected_mean_isi = (1/rate).rescale(ms)
self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
class HomogeneousGammaProcessTestCase(unittest.TestCase):
def setUp(self):
pass
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
np.random.seed(seed=12345)
a = 3.0
for b in (67.0*Hz, 0.067*kHz):
for t_stop in (2345*ms, 2.345*second):
spiketrain = stgen.homogeneous_gamma_process(a, b, t_stop=t_stop)
intervals = isi(spiketrain)
expected_spike_count = int((b/a * t_stop).simplified)
self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.25) # should fail about 1 time in 1000
expected_mean_isi = (a/b).rescale(ms)
self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.3)
expected_first_spike = 0*ms
self.assertLess(spiketrain[0] - expected_first_spike, 4*expected_mean_isi)
expected_last_spike = t_stop
self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
# Kolmogorov-Smirnov test
D, p = kstest(intervals.rescale(t_stop.units),
"gamma",
args=(a, 0, (1/b).rescale(t_stop.units)), # args are (a, loc, scale)
alternative='two-sided')
self.assertGreater(p, 0.001)
self.assertLess(D, 0.25)
class _n_poisson_TestCase(unittest.TestCase):
def setUp(self):
self.n = 4
self.rate = 10*Hz
self.rates = range(1, self.n + 1)*Hz
self.t_stop = 10000*ms
def test_poisson(self):
# Check the output types for input rate + n number of neurons
pp = stgen._n_poisson(rate=self.rate, t_stop=self.t_stop, n=self.n)
self.assertIsInstance(pp, list)
self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
self.assertEqual(pp[0].simplified.units, 1000*ms)
self.assertEqual(len(pp), self.n)
# Check the output types for input list of rates
pp = stgen._n_poisson(rate=self.rates, t_stop=self.t_stop)
self.assertIsInstance(pp, list)
self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
self.assertEqual(pp[0].simplified.units, 1000*ms)
self.assertEqual(len(pp), self.n)
def test_poisson_error(self):
# Dimensionless rate
self.assertRaises(
ValueError, stgen._n_poisson, rate=5, t_stop=self.t_stop)
# Negative rate
self.assertRaises(
ValueError, stgen._n_poisson, rate=-5*Hz, t_stop=self.t_stop)
# Negative value when rate is a list
self.assertRaises(
ValueError, stgen._n_poisson, rate=[-5, 3]*Hz, t_stop=self.t_stop)
# Negative n
self.assertRaises(
ValueError, stgen._n_poisson, rate=self.rate, t_stop=self.t_stop,
n=-1)
# t_start>t_stop
self.assertRaises(
ValueError, stgen._n_poisson, rate=self.rate, t_start=4*ms,
t_stop=3*ms, n=3)
class singleinteractionprocess_TestCase(unittest.TestCase):
def setUp(self):
self.n = 4
self.rate = 10*Hz
self.rates = range(1, self.n + 1)*Hz
self.t_stop = 10000*ms
self.rate_c = 1*Hz
def test_sip(self):
# Generate an example SIP mode
sip, coinc = stgen.single_interaction_process(
n=self.n, t_stop=self.t_stop, rate=self.rate,
rate_c=self.rate_c, return_coinc=True)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000*ms)
self.assertEqual(coinc[0].simplified.units, 1000*ms)
# Check the output length
self.assertEqual(len(sip), self.n)
self.assertEqual(
len(coinc[0]), (self.rate_c*self.t_stop).rescale(dimensionless))
# Generate an example SIP mode giving a list of rates as imput
sip, coinc = stgen.single_interaction_process(
t_stop=self.t_stop, rate=self.rates,
rate_c=self.rate_c, return_coinc=True)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000*ms)
self.assertEqual(coinc[0].simplified.units, 1000*ms)
# Check the output length
self.assertEqual(len(sip), self.n)
self.assertEqual(
len(coinc[0]), (self.rate_c*self.t_stop).rescale(dimensionless))
# Generate an example SIP mode stochastic number of coincidences
sip = stgen.single_interaction_process(
n=self.n, t_stop=self.t_stop, rate=self.rate,
rate_c=self.rate_c, coincidences='stochastic', return_coinc=False)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000*ms)
def test_sip_error(self):
# Negative rate
self.assertRaises(
ValueError, stgen.single_interaction_process, n=self.n, rate=-5*Hz,
rate_c=self.rate_c, t_stop=self.t_stop)
# Negative coincidence rate
self.assertRaises(
ValueError, stgen.single_interaction_process, n=self.n,
rate=self.rate, rate_c=-3*Hz, t_stop=self.t_stop)
# Negative value when rate is a list
self.assertRaises(
ValueError, stgen.single_interaction_process, n=self.n,
rate=[-5, 3, 4, 2]*Hz, rate_c=self.rate_c, t_stop=self.t_stop)
# Negative n
self.assertRaises(
ValueError, stgen.single_interaction_process, n=-1,
rate=self.rate, rate_c=self.rate_c, t_stop=self.t_stop)
# Rate_c < rate
self.assertRaises(
ValueError, stgen.single_interaction_process, n=self.n,
rate=self.rate, rate_c=self.rate + 1*Hz, t_stop=self.t_stop)
class cppTestCase(unittest.TestCase):
def test_cpp_hom(self):
# testing output with generic inputs
A = [0, .9, .1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = 3 * Hz
cpp_hom = stgen.cpp(rate, A, t_stop, t_start=t_start)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_hom], [neo.SpikeTrain]*len(cpp_hom))
self.assertEqual(cpp_hom[0].simplified.units, 1000 * ms)
self.assertEqual(type(cpp_hom), list)
# testing quantities format of the output
self.assertEqual(
[train.simplified.units for train in cpp_hom], [1000 * ms]*len(
cpp_hom))
# testing output t_start t_stop
for st in cpp_hom:
self.assertEqual(st.t_stop, t_stop)
self.assertEqual(st.t_start, t_start)
self.assertEqual(len(cpp_hom), len(A) - 1)
# testing the units
A = [0, 0.9, 0.1]
t_stop = 10000*ms
t_start = 5 * 1000 * ms
rate = 3 * Hz
cpp_unit = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(cpp_unit[0].units, t_stop.units)
self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
# testing output without copy of spikes
A = [1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = 3 * Hz
cpp_hom_empty = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_hom_empty], [0]*len(cpp_hom_empty))
# testing output with rate equal to 0
A = [0, .9, .1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = 0 * Hz
cpp_hom_empty_r = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_hom_empty_r], [0]*len(
cpp_hom_empty_r))
# testing output with same spike trains in output
A = [0, 0, 1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = 3 * Hz
cpp_hom_eq = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertTrue(
np.allclose(cpp_hom_eq[0].magnitude, cpp_hom_eq[1].magnitude))
def test_cpp_hom_errors(self):
# testing raises of ValueError (wrong inputs)
# testing empty amplitude
self.assertRaises(
ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=3*Hz)
# testing sum of amplitude>1
self.assertRaises(
ValueError, stgen.cpp, A=[1, 1, 1], t_stop=10*1000 * ms, rate=3*Hz)
# testing negative value in the amplitude
self.assertRaises(
ValueError, stgen.cpp, A=[-1, 1, 1], t_stop=10*1000 * ms,
rate=3*Hz)
# test negative rate
self.assertRaises(
AssertionError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=-3*Hz)
# test wrong unit for rate
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=3*1000 * ms)
# testing raises of AttributeError (missing input units)
# Testing missing unit to t_stop
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10, rate=3*Hz)
# Testing missing unit to t_start
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms, rate=3*Hz,
t_start=3)
# testing rate missing unit
self.assertRaises(
AttributeError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=3)
def test_cpp_het(self):
# testing output with generic inputs
A = [0, .9, .1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = [3, 4] * Hz
cpp_het = stgen.cpp(rate, A, t_stop, t_start=t_start)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_het], [neo.SpikeTrain]*len(cpp_het))
self.assertEqual(cpp_het[0].simplified.units, 1000 * ms)
self.assertEqual(type(cpp_het), list)
# testing units
self.assertEqual(
[train.simplified.units for train in cpp_het], [1000 * ms]*len(
cpp_het))
# testing output t_start and t_stop
for st in cpp_het:
self.assertEqual(st.t_stop, t_stop)
self.assertEqual(st.t_start, t_start)
# testing the number of output spiketrains
self.assertEqual(len(cpp_het), len(A) - 1)
self.assertEqual(len(cpp_het), len(rate))
# testing the units
A = [0, 0.9, 0.1]
t_stop = 10000*ms
t_start = 5 * 1000 * ms
rate = [3, 4] * Hz
cpp_unit = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(cpp_unit[0].units, t_stop.units)
self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
# testing without copying any spikes
A = [1, 0, 0]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = [3, 4] * Hz
cpp_het_empty = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(len(cpp_het_empty[0]), 0)
# testing output with rate equal to 0
A = [0, .9, .1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = [0, 0] * Hz
cpp_het_empty_r = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_het_empty_r], [0]*len(
cpp_het_empty_r))
# testing completely sync spiketrains
A = [0, 0, 1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = [3, 3] * Hz
cpp_het_eq = stgen.cpp(rate, A, t_stop, t_start=t_start)
self.assertTrue(np.allclose(
cpp_het_eq[0].magnitude, cpp_het_eq[1].magnitude))
def test_cpp_het_err(self):
# testing raises of ValueError (wrong inputs)
# testing empty amplitude
self.assertRaises(
ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
# testing sum amplitude>1
self.assertRaises(
ValueError, stgen.cpp, A=[1, 1, 1], t_stop=10*1000 * ms,
rate=[3, 4]*Hz)
# testing amplitude negative value
self.assertRaises(
ValueError, stgen.cpp, A=[-1, 1, 1], t_stop=10*1000 * ms,
rate=[3, 4]*Hz)
# testing negative rate
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=[-3, 4]*Hz)
# testing empty rate
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms, rate=[]*Hz)
# testing empty amplitude
self.assertRaises(
ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
# testing different len(A)-1 and len(rate)
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
# testing rate with different unit from Hz
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1], t_stop=10*1000 * ms,
rate=[3, 4]*1000 * ms)
# Testing analytical constrain between amplitude and rate
self.assertRaises(
ValueError, stgen.cpp, A=[0, 0, 1], t_stop=10*1000 * ms,
rate=[3, 4]*Hz, t_start=3)
# testing raises of AttributeError (missing input units)
# Testing missing unit to t_stop
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10, rate=[3, 4]*Hz)
# Testing missing unit to t_start
self.assertRaises(
ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=[3, 4]*Hz, t_start=3)
# Testing missing unit to rate
self.assertRaises(
AttributeError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
rate=[3, 4])
def test_cpp_jttered(self):
# testing output with generic inputs
A = [0, .9, .1]
t_stop = 10 * 1000 * ms
t_start = 5 * 1000 * ms
rate = 3 * Hz
cpp_shift = stgen.cpp(
rate, A, t_stop, t_start=t_start, shift=3*ms)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_shift], [neo.SpikeTrain]*len(
cpp_shift))
self.assertEqual(cpp_shift[0].simplified.units, 1000 * ms)
self.assertEqual(type(cpp_shift), list)
# testing quantities format of the output
self.assertEqual(
[train.simplified.units for train in cpp_shift],
[1000 * ms]*len(cpp_shift))
# testing output t_start t_stop
for st in cpp_shift:
self.assertEqual(st.t_stop, t_stop)
self.assertEqual(st.t_start, t_start)
self.assertEqual(len(cpp_shift), len(A) - 1)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the cbuildbot script."""
from __future__ import print_function
import argparse
import glob
import optparse # pylint: disable=deprecated-module
import os
import sys
import pytest # pylint: disable=import-error
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import cgroups
from chromite.cbuildbot import commands
from chromite.lib import config_lib_unittest
from chromite.lib import constants
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot.builders import simple_builders
from chromite.lib import cidb
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import partial_mock
from chromite.lib import sudo
from chromite.lib.buildstore import FakeBuildStore
from chromite.scripts import cbuildbot
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class BuilderRunMock(partial_mock.PartialMock):
"""Partial mock for BuilderRun class."""
TARGET = 'chromite.cbuildbot.cbuildbot_run._BuilderRunBase'
ATTRS = ('GetVersionInfo', 'DetermineChromeVersion',)
def __init__(self, verinfo):
super(BuilderRunMock, self).__init__()
self._version_info = verinfo
def GetVersionInfo(self, _inst):
"""This way builders don't have to set the version from the overlay"""
return self._version_info
def DetermineChromeVersion(self, _inst):
"""Normaly this runs a portage command to look at the chrome ebuild"""
return self._version_info.chrome_branch
class SimpleBuilderTestCase(cros_test_lib.MockTestCase):
"""Common stubs for SimpleBuilder tests."""
CHROME_BRANCH = '27'
VERSION = '1234.5.6'
def setUp(self):
verinfo = manifest_version.VersionInfo(
version_string=self.VERSION, chrome_branch=self.CHROME_BRANCH)
self.StartPatcher(BuilderRunMock(verinfo))
self.PatchObject(simple_builders.SimpleBuilder, 'GetVersionInfo',
return_value=verinfo)
class TestArgsparseError(Exception):
"""Exception used by parser.error() mock to halt execution."""
class TestHaltedException(Exception):
"""Exception used by mocks to halt execution without indicating failure."""
class RunBuildStagesTest(cros_test_lib.RunCommandTempDirTestCase,
SimpleBuilderTestCase):
"""Test that cbuildbot runs the appropriate stages for a given config."""
def setUp(self):
self.buildroot = os.path.join(self.tempdir, 'buildroot')
osutils.SafeMakedirs(self.buildroot)
# Always stub RunCommmand out as we use it in every method.
self.site_config = config_lib_unittest.MockSiteConfig()
self.build_config = config_lib_unittest.MockBuildConfig()
self.bot_id = self.build_config.name
self.build_config['master'] = False
self.build_config['important'] = False
self.buildstore = FakeBuildStore()
# Use the cbuildbot parser to create properties and populate default values.
self.parser = cbuildbot._CreateParser()
argv = ['-r', self.buildroot, '--buildbot', '--debug', self.bot_id]
self.options = cbuildbot.ParseCommandLine(self.parser, argv)
self.options.bootstrap = False
self.options.clean = False
self.options.resume = False
self.options.sync = False
self.options.build = False
self.options.uprev = False
self.options.tests = False
self.options.archive = False
self.options.remote_test_status = False
self.options.patches = None
self.options.prebuilts = False
self._manager = parallel.Manager()
# Pylint-1.9 has a false positive on this for some reason.
self._manager.__enter__() # pylint: disable=no-value-for-parameter
self.run = cbuildbot_run.BuilderRun(self.options, self.site_config,
self.build_config, self._manager)
self.rc.AddCmdResult(
[constants.PATH_TO_CBUILDBOT, '--reexec-api-version'],
output=constants.REEXEC_API_VERSION)
def tearDown(self):
# Mimic exiting a 'with' statement.
if hasattr(self, '_manager'):
self._manager.__exit__(None, None, None)
def testChromeosOfficialSet(self):
"""Verify that CHROMEOS_OFFICIAL is set correctly."""
self.build_config['chromeos_official'] = True
cidb.CIDBConnectionFactory.SetupNoCidb()
# Clean up before.
os.environ.pop('CHROMEOS_OFFICIAL', None)
simple_builders.SimpleBuilder(self.run, self.buildstore).Run()
self.assertIn('CHROMEOS_OFFICIAL', os.environ)
def testChromeosOfficialNotSet(self):
"""Verify that CHROMEOS_OFFICIAL is not always set."""
self.build_config['chromeos_official'] = False
cidb.CIDBConnectionFactory.SetupNoCidb()
# Clean up before.
os.environ.pop('CHROMEOS_OFFICIAL', None)
simple_builders.SimpleBuilder(self.run, self.buildstore).Run()
self.assertNotIn('CHROMEOS_OFFICIAL', os.environ)
class LogTest(cros_test_lib.TempDirTestCase):
"""Test logging functionality."""
def _generateLogs(self, num):
"""Generates cbuildbot.log and num backups."""
with open(os.path.join(self.tempdir, 'cbuildbot.log'), 'w') as f:
f.write(str(num + 1))
for i in range(1, num + 1):
with open(os.path.join(self.tempdir, 'cbuildbot.log.' + str(i)),
'w') as f:
f.write(str(i))
def testZeroToOneLogs(self):
"""Test beginning corner case."""
self._generateLogs(0)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.1')) as f:
self.assertEqual(f.readline(), '1')
def testNineToTenLogs(self):
"""Test handling *.log.9 to *.log.10 (correct sorting)."""
self._generateLogs(9)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.10')) as f:
self.assertEqual(f.readline(), '10')
def testOverLimit(self):
"""Test going over the limit and having to purge old logs."""
self._generateLogs(25)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.26')) as f:
self.assertEqual(f.readline(), '26')
self.assertEqual(len(glob.glob(os.path.join(self.tempdir, 'cbuildbot*'))),
25)
class InterfaceTest(cros_test_lib.MockTestCase, cros_test_lib.LoggingTestCase):
"""Test the command line interface."""
_GENERIC_PREFLIGHT = 'amd64-generic-paladin'
_BUILD_ROOT = '/b/test_build1'
def setUp(self):
self.parser = cbuildbot._CreateParser()
self.site_config = config_lib_unittest.MockSiteConfig()
def assertDieSysExit(self, *args, **kwargs):
self.assertRaises(cros_build_lib.DieSystemExit, *args, **kwargs)
def testDepotTools(self):
"""Test that the entry point used by depot_tools works."""
path = os.path.join(constants.SOURCE_ROOT, 'chromite', 'bin', 'cbuildbot')
# Verify the tests below actually are testing correct behaviour;
# specifically that it doesn't always just return 0.
self.assertRaises(cros_build_lib.RunCommandError,
cros_build_lib.run,
['cbuildbot', '--monkeys'], cwd=constants.SOURCE_ROOT)
# Validate depot_tools lookup.
cros_build_lib.run(
['cbuildbot', '--help'], cwd=constants.SOURCE_ROOT, capture_output=True)
# Validate buildbot invocation pathway.
cros_build_lib.run(
[path, '--help'], cwd=constants.SOURCE_ROOT, capture_output=True)
def testBuildBotOption(self):
"""Test that --buildbot option unsets debug flag."""
args = ['-r', self._BUILD_ROOT, '--buildbot', self._GENERIC_PREFLIGHT]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertFalse(options.debug)
self.assertTrue(options.buildbot)
def testBuildBotWithDebugOption(self):
"""Test that --debug option overrides --buildbot option."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--debug',
self._GENERIC_PREFLIGHT]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertTrue(options.debug)
self.assertTrue(options.buildbot)
def testBuildBotWithRemotePatches(self):
"""Test that --buildbot errors out with patches."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '-g', '1234',
self._GENERIC_PREFLIGHT]
self.assertDieSysExit(cbuildbot.ParseCommandLine, self.parser, args)
def testBuildbotDebugWithPatches(self):
"""Test we can test patches with --buildbot --debug."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--debug', '-g', '1234',
self._GENERIC_PREFLIGHT]
cbuildbot.ParseCommandLine(self.parser, args)
def testBuildBotWithoutProfileOption(self):
"""Test that no --profile option gets defaulted."""
args = ['-r', self._BUILD_ROOT, '--buildbot', self._GENERIC_PREFLIGHT]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertEqual(options.profile, None)
def testBuildBotWithProfileOption(self):
"""Test that --profile option gets parsed."""
args = ['-r', self._BUILD_ROOT, '--buildbot',
'--profile', 'carp', self._GENERIC_PREFLIGHT]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertEqual(options.profile, 'carp')
def testValidateClobberUserDeclines_1(self):
"""Test case where user declines in prompt."""
self.PatchObject(os.path, 'exists', return_value=True)
self.PatchObject(cros_build_lib, 'GetInput', return_value='No')
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
def testValidateClobberUserDeclines_2(self):
"""Test case where user does not enter the full 'yes' pattern."""
self.PatchObject(os.path, 'exists', return_value=True)
m = self.PatchObject(cros_build_lib, 'GetInput', side_effect=['asdf', 'No'])
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
self.assertEqual(m.call_count, 2)
def testValidateClobberProtectRunningChromite(self):
"""User should not be clobbering our own source."""
cwd = os.path.dirname(os.path.realpath(__file__))
buildroot = os.path.dirname(cwd)
self.assertDieSysExit(commands.ValidateClobber, buildroot)
def testValidateClobberProtectRoot(self):
"""User should not be clobbering /"""
self.assertDieSysExit(commands.ValidateClobber, '/')
def testBuildBotWithBadChromeRevOption(self):
"""chrome_rev can't be passed an invalid option after chrome_root."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
self._GENERIC_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot.ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRootOption(self):
"""chrome_root can't get passed after non-local chrome_rev."""
args = [
'--buildbot',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_root=.',
self._GENERIC_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot.ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRevOptionLocal(self):
"""chrome_rev can't be local without chrome_root."""
args = [
'--buildbot',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._GENERIC_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot.ParseCommandLine, self.parser, args)
def testBuildBotWithGoodChromeRootOption(self):
"""chrome_root can be set without chrome_rev."""
args = [
'--buildbot',
'--buildroot=/tmp',
'--chrome_root=.',
self._GENERIC_PREFLIGHT,
]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertEqual(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEqual(options.chrome_root, None)
def testBuildBotWithGoodChromeRevAndRootOption(self):
"""chrome_rev can get reset around chrome_root."""
args = [
'--buildbot',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._GENERIC_PREFLIGHT,
]
options = cbuildbot.ParseCommandLine(self.parser, args)
self.assertEqual(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEqual(options.chrome_root, None)
@pytest.mark.usefixtures('singleton_manager')
class FullInterfaceTest(cros_test_lib.MockTempDirTestCase):
"""Tests that run the cbuildbot.main() function directly.
Note this explicitly suppresses automatic VerifyAll() calls; thus if you want
that checked, you have to invoke it yourself.
"""
def MakeTestRootDir(self, relpath):
abspath = os.path.join(self.root, relpath)
osutils.SafeMakedirs(abspath)
return abspath
def setUp(self):
self.root = self.tempdir
self.buildroot = self.MakeTestRootDir('build_root')
self.sourceroot = self.MakeTestRootDir('source_root')
osutils.SafeMakedirs(os.path.join(self.sourceroot, '.repo', 'manifests'))
osutils.SafeMakedirs(os.path.join(self.sourceroot, '.repo', 'repo'))
# Stub out all relevant methods regardless of whether they are called in the
# specific test case.
self.PatchObject(optparse.OptionParser, 'error',
side_effect=TestArgsparseError())
self.PatchObject(argparse.ArgumentParser, 'error',
side_effect=TestArgsparseError())
self.inchroot_mock = self.PatchObject(cros_build_lib, 'IsInsideChroot',
return_value=False)
self.input_mock = self.PatchObject(cros_build_lib, 'GetInput',
side_effect=Exception())
self.PatchObject(cbuildbot, '_RunBuildStagesWrapper', return_value=True)
# Suppress cgroups code. For cbuildbot invocation, it doesn't hugely
# care about cgroups- that's a blackbox to it. As such these unittests
# should not be sensitive to it.
self.PatchObject(cgroups.Cgroup, 'IsSupported',
return_value=True)
self.PatchObject(cgroups, 'SimpleContainChildren')
self.PatchObject(sudo.SudoKeepAlive, '_IdentifyTTY', return_value='unknown')
def assertMain(self, args, common_options=True):
if common_options:
args.extend(['--sourceroot', self.sourceroot, '--notee'])
try:
return cbuildbot.main(args)
finally:
cros_build_lib.STRICT_SUDO = False
def testNullArgsStripped(self):
"""Test that null args are stripped out and don't cause error."""
self.assertMain(['-r', self.buildroot, '', '',
'amd64-generic-full-tryjob'])
def testMultipleConfigsError(self):
"""Test that multiple configs cause error."""
with self.assertRaises(cros_build_lib.DieSystemExit):
self.assertMain(['-r', self.buildroot,
'arm-generic-full-tryjob',
'amd64-generic-full-tryjob'])
def testBuildbotDiesInChroot(self):
"""Buildbot should quit if run inside a chroot."""
self.inchroot_mock.return_value = True
with self.assertRaises(cros_build_lib.DieSystemExit):
self.assertMain(['--debug', '-r', self.buildroot,
'amd64-generic-full-tryjob'])
def testBuildBotOnNonCIBuilder(self):
"""Test BuildBot On Non-CIBuilder
Buildbot should quite if run in a non-CIBuilder without
both debug and remote.
"""
if not cros_build_lib.HostIsCIBuilder():
with self.assertRaises(cros_build_lib.DieSystemExit):
self.assertMain(['--buildbot', 'amd64-generic-full'])
| |
#!/usr/bin/env python
"""
Calculate output results dictionary.
"""
from collections import OrderedDict as odict
import numpy as np
import yaml
import numpy.lib.recfunctions as recfuncs
import astropy.coordinates
from astropy.coordinates import SkyCoord
import astropy.units as u
import ugali.analysis.source
import ugali.analysis.loglike
import ugali.utils.stats
from ugali.utils.stats import Samples
from ugali.utils.projector import dist2mod,mod2dist
from ugali.utils.projector import cel2gal,gal2cel,gal2cel_angle
from ugali.utils.projector import ang2const, ang2iau
from ugali.utils.config import Config
from ugali.utils.logger import logger
_alpha = 0.32
class Results(object):
"""
Calculate results from a MCMC chain.
"""
def __init__(self, config, loglike, samples=None):
self.config = Config(config)
self.alpha = self.config['results'].get('alpha',_alpha)
self.nwalkers = self.config['mcmc'].get('nwalkers',100)
self.nburn = self.config['results'].get('nburn',10)
self.coordsys = self.config['coords']['coordsys'].lower()
self.loglike = loglike
self.source = self.loglike.source
self.params = list(self.source.get_free_params().keys())
self.samples = samples
def load_samples(self,filename):
samples = Samples(filename)
self.samples = samples.supplement(coordsys=self.coordsys)
def get_mle(self):
mle = self.source.get_params()
# FIXME: For composite isochrones
if 'age' not in mle:
mle['age'] = np.average(self.source.isochrone.age)
if 'metallicity' not in mle:
mle['metallicity'] = np.average(self.source.isochrone.metallicity)
return mle
def estimate(self,param,burn=None,clip=10.0,alpha=_alpha):
""" Estimate parameter value and uncertainties
Parameters
----------
param : parameter of interest
burn : number of burn in samples
clip : sigma clipping
alpha : confidence interval
Returns
-------
[mle, [lo,hi]] : value and interval
"""
# FIXME: Need to add age and metallicity to composite isochrone params (currently properties)
if alpha is None: alpha = self.alpha
if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']:
msg = 'Unrecognized parameter: %s'%param
raise KeyError(msg)
# If the parameter is in the samples
if param in self.samples.names:
if param.startswith('position_angle'):
return self.estimate_position_angle(param,burn=burn,
clip=clip,alpha=alpha)
return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha)
mle = self.get_mle()
errors = [np.nan,np.nan]
# Set default value to the MLE value
if param in self.source.params:
err = self.source.params[param].errors
if err is not None: errors = err
# For age and metallicity from composite isochrone
return [float(mle[param]),errors]
### if (param not in self.params) or (param not in :
### return [float(mle[param]),errors]
###
### if param not in self.samples.names:
### return [float(mle[param]),errors]
###
### msg = "Unrecognized parameter: %s"%param
### raise KeyError(msg)
def estimate_params(self,burn=None,clip=10.0,alpha=None):
""" Estimate all source parameters """
if alpha is None: alpha = self.alpha
mle = self.get_mle()
out = odict()
for param in mle.keys():
out[param] = self.estimate(param,burn=burn,clip=clip,alpha=alpha)
return out
def estimate_position_angle(self,param='position_angle',burn=None,clip=10.0,alpha=None):
""" Estimate the position angle from the posterior dealing
with periodicity.
"""
if alpha is None: alpha = self.alpha
# Transform so peak in the middle of the distribution
pa = self.samples.get(param,burn=burn,clip=clip)
peak = ugali.utils.stats.kde_peak(pa)
shift = 180.*((pa+90-peak)>180)
pa -= shift
# Get the kde interval
ret = ugali.utils.stats.peak_interval(pa,alpha)
if ret[0] < 0:
ret[0] += 180.; ret[1][0] += 180.; ret[1][1] += 180.;
return ret
def bayes_factor(self,param,burn=None,clip=10.0,bins=50):
# CAREFUL: Assumes a flat prior...
try:
data = self.samples.get(param,burn=burn,clip=clip)
except ValueError as msg:
logger.warning(msg)
return ugali.utils.stats.interval(np.nan)
bmin,bmax = self.source.params[param].bounds
bins = np.linspace(bmin,bmax,bins)
n,b = np.histogram(data,bins=bins,normed=True)
prior = 1.0/(bmax-bmin)
posterior = n[0]
# Excluding the null hypothesis
bf = prior/posterior
return ugali.utils.stats.interval(bf)
def get_results(self,**kwargs):
kwargs.setdefault('alpha',self.alpha)
kwargs.setdefault('burn',self.nburn*self.nwalkers)
# Calculate best-fit parameters from MCMC chain
logger.debug('Estimating parameters...')
estimate = self.estimate_params(**kwargs)
params = {k:v[0] for k,v in estimate.items()}
results = dict(estimate)
# Extra parameters from the MCMC chain
logger.debug('Estimating auxiliary parameters...')
logger.debug("alpha = %.2f"%kwargs['alpha'])
results['alpha'] = kwargs['alpha']
try:
results['ra'] = self.estimate('ra',**kwargs)
results['dec'] = self.estimate('dec',**kwargs)
results['glon'] = self.estimate('glon',**kwargs)
results['glat'] = self.estimate('glat',**kwargs)
except KeyError:
logger.warn("Didn't find 'ra' or 'dec' in Samples...")
if self.coordsys == 'gal':
results['glon'] = results['lon']
results['glat'] = results['lat']
ra,dec = gal2cel(results['lon'][0],results['lat'][0])
results['ra'] = ugali.utils.stats.interval(ra)
results['dec'] = ugali.utils.stats.interval(dec)
else:
results['ra'] = results['lon']
results['dec'] = results['lat']
glon,glat = cel2gal(results['lon'][0],results['lat'][0])
results['glon'] = ugali.utils.stats.interval(glon)
results['glat'] = ugali.utils.stats.interval(glat)
lon,lat = results['lon'][0],results['lat'][0]
ra,dec = results['ra'][0],results['dec'][0]
glon,glat = results['glon'][0],results['glat'][0]
results.update(gal=[float(glon),float(glat)])
results.update(cel=[float(ra),float(dec)])
try:
results['position_angle_cel'] = self.estimate('position_angle_cel',**kwargs)
except KeyError:
results['position_angle_cel'] = ugali.utils.stats.interval(np.nan)
# Update the loglike to the best-fit parameters from the chain
logger.debug('Calculating TS...')
ts = 2*self.loglike.value(**params)
results['ts'] = ugali.utils.stats.interval(ts,np.nan,np.nan)
# Celestial position angle
# Break ambiguity in direction with '% 180.'
pa,pa_err = results['position_angle']
pa_cel = gal2cel_angle(lon,lat,pa) % 180.
pa_cel_err = np.array(pa_err) - pa + pa_cel
results['position_angle_cel'] = ugali.utils.stats.interval(pa_cel,pa_cel_err[0],pa_cel_err[1])
mod,mod_err = estimate['distance_modulus']
dist = mod2dist(mod)
dist_lo,dist_hi = [mod2dist(mod_err[0]),mod2dist(mod_err[1])]
results['distance'] = ugali.utils.stats.interval(dist,dist_lo,dist_hi)
dist,dist_err = results['distance']
ext,ext_err = estimate['extension']
ext_sigma = np.nan_to_num(np.array(ext_err) - ext)
results['extension_arcmin'] = ugali.utils.stats.interval(60*ext,60*ext_err[0],60*ext_err[1])
# Radially symmetric extension (correct for ellipticity).
ell,ell_err = estimate['ellipticity']
rext,rext_err = ext*np.sqrt(1-ell),np.array(ext_err)*np.sqrt(1-ell)
rext_sigma = np.nan_to_num(np.array(rext_err) - rext)
results['extension_radial'] = ugali.utils.stats.interval(rext,rext_err[0],rext_err[1])
results['extension_radial_arcmin'] = ugali.utils.stats.interval(60*rext,60*rext_err[0],60*rext_err[1])
# Bayes factor for ellipticity
results['ellipticity_bayes_factor'] = self.bayes_factor('ellipticity',burn=kwargs['burn'])
# Physical Size (should do this with the posteriors)
# Radially symmetric
dist_sigma = np.nan_to_num(np.array(dist_err) - dist)
size = np.arctan(np.radians(ext)) * dist
size_sigma = size * np.sqrt((ext_sigma/ext)**2 + (dist_sigma/dist)**2)
size_err = [size-size_sigma[0],size+size_sigma[1]]
results['physical_size'] = ugali.utils.stats.interval(size,size_err[0],size_err[1])
rsize = np.arctan(np.radians(rext)) * dist
rsize_sigma = rsize * np.sqrt((rext_sigma/rext)**2 + (dist_sigma/dist)**2)
rsize_err = [rsize-rsize_sigma[0],rsize+rsize_sigma[1]]
results['physical_size_radial'] = ugali.utils.stats.interval(rsize,rsize_err[0],rsize_err[1])
# Richness
rich,rich_err = estimate['richness']
# Number of observed stars (sum of p-values)
nobs = self.loglike.p.sum()
nobs_lo,nobs_hi = nobs + np.sqrt(nobs)*np.array([-1,1])
results['nobs'] = ugali.utils.stats.interval(nobs,nobs_lo,nobs_hi)
# Number of predicted stars (pixelization effects?)
npred = self.loglike.f*rich
npred_lo,npred_hi = rich_err[0]*self.loglike.f,rich_err[1]*self.loglike.f
results['npred'] = ugali.utils.stats.interval(npred,npred_lo,npred_hi)
# Careful, depends on the isochrone...
stellar_mass = self.source.stellar_mass()
mass = rich*stellar_mass
mass_lo,mass_hi = rich_err[0]*stellar_mass,rich_err[1]*stellar_mass
results['mass'] = ugali.utils.stats.interval(mass,mass_lo,mass_hi)
stellar_luminosity = self.source.stellar_luminosity()
lum = rich*stellar_luminosity
lum_lo,lum_hi = rich_err[0]*stellar_luminosity,rich_err[1]*stellar_luminosity
results['luminosity'] = ugali.utils.stats.interval(lum,lum_lo,lum_hi)
# Absolute magnitude only calculated for DES isochrones with g,r
try:
Mv = self.source.absolute_magnitude(rich)
Mv_lo = self.source.absolute_magnitude(rich_err[0])
Mv_hi = self.source.absolute_magnitude(rich_err[1])
results['Mv'] = ugali.utils.stats.interval(Mv,Mv_lo,Mv_hi)
except ValueError as e:
logger.warning("Skipping absolute magnitude")
logger.warn(str(e))
Mv = np.nan
results['Mv'] = Mv
mu = surfaceBrightness(Mv, rsize, dist) ##updated from size-->rsize
results['surface_brightness'] = ugali.utils.stats.interval(mu,np.nan,np.nan)
# ADW: WARNING this is very fragile.
# Also, this is not quite right, should cut on the CMD available space
kwargs = dict(richness=rich,mag_bright=16., mag_faint=23.,
n_trials=5000,alpha=kwargs['alpha'], seed=0)
martin = self.config['results'].get('martin')
if martin:
logger.info("Calculating Martin magnitude...")
if martin > 1: kwargs['n_trials'] = martin
Mv_martin = self.source.isochrone.absolute_magnitude_martin(**kwargs)
results['Mv_martin'] = Mv_martin
mu_martin = surfaceBrightness(Mv_martin, rsize, dist) ##updated from size-->rsize
results['surface_brightness_martin'] = ugali.utils.stats.interval(mu_martin,np.nan,np.nan)
else:
logger.warning("Skipping Martin magnitude")
results['Mv_martin'] = np.nan
results['surface_brightness_martin'] = np.nan
try:
results['constellation'] = ang2const(lon,lat,self.coordsys)[1]
except:
pass
results['iau'] = ugali.utils.projector.ang2iau(lon,lat,self.coordsys)
coord = SkyCoord(ra*u.deg,dec*u.deg,distance=dist*u.kpc)
results['ra_sex'] = str(coord.ra.to_string())
results['dec_sex'] = str(coord.dec.to_string())
# Calculate some separations from GC, LMC, SMC
#NED coordinates with de Grisj distance
LMC = SkyCoord(80.8939*u.deg,-69.7561*u.deg,distance=49.89*u.kpc)
#NED coordinates with de Grisj distance
SMC = SkyCoord(13.1866*u.deg,-72.8286*u.deg,distance=61.94*u.kpc)
# GC from astropy?
GC = SkyCoord(266.4168262*u.deg,-29.0077969*u.deg,distance=8.0*u.kpc)
results['d_gc'] = coord.separation_3d(GC).value
results['d_lmc'] = coord.separation_3d(LMC).value
results['d_smc'] = coord.separation_3d(SMC).value
try:
results['feh'] = float(self.source.isochrone.feh)
except:
results['feh'] = np.nan
output = dict()
output['params'] = params
output['results'] = results
return output
def write(self,filename):
if self.samples is not None:
results = dict(self.get_results())
params = dict(params=results.pop('params'))
else:
results = dict(results=dict())
params = dict(params=dict())
source = dict(source=self.source.todict())
out = open(filename,'w')
out.write(yaml.dump(params,default_flow_style=False))
out.write(yaml.dump(results))
out.write(yaml.dump(source))
out.close()
def surfaceBrightness(abs_mag, r_physical, distance):
"""Compute the average surface brightness [mag/arcsec^2] within the
azimuthally averaged physical half-light radius.
The azimuthally averaged physical half-light radius is used
because the surface brightness is calculated as a function of
area. The area of an ellipse is:
A = pi * a*b = pi * a**2 * (1-e) = pi * r**2
The factor 2 in the c_v equation below accounts for the fact that
we are taking half the luminosity within the half-light
radius. The 3600.**2 is conversion from deg^2 to arcsec^2
c_v = 2.5 * np.log10(2.) + 2.5 * np.log10(np.pi * 3600.**2) = 19.78
TODO: Distance could be optional
Parameters
----------
abs_mag : absolute V-band magnitude [mag]
r_physical : azimuthally averaged physical half-light radius [kpc]
distance : heliocentric distance [kpc]
Returns
-------
mu : surface brightness [mag/arcsec^2]
"""
c_v = 19.78 # conversion to mag/arcsec^2
r_angle = np.degrees(np.arctan(r_physical / distance))
return abs_mag + dist2mod(distance) + c_v + 2.5 * np.log10(r_angle**2)
def surfaceBrightness2(app_mag, r_half_arcmin):
"""Compute the average surface brightness [mag/arcsec^2] within the
azimuthally averaged angular half-light radius.
The azimuthally averaged half-light radius is used
because the surface brightness is calculated as a function of
area. The area of an ellipse is:
A = pi * a*b = pi * a**2 * (1-e) = pi * r**2
The factor 2.5*log10(2) accounts for the fact that half the
luminosity is within the half-light radius.
Parameters
----------
app_mag : apparent V-band magnitude [mag]
r_half : azimuthally averaged half-light radius [arcmin]
Returns
-------
mu : surface brightness [mag/arcsec^2]
"""
r_half_arcsec = r_half_arcmin * 60 # arcmin to arcsec
mu = app_mag + 2.5*np.log10(2.) + 2.5*np.log10(np.pi * r_half_arcsec**2)
return mu
def createResults(config,srcfile,section='source',samples=None):
""" Create an MCMC instance """
source = ugali.analysis.source.Source()
source.load(srcfile,section=section)
loglike = ugali.analysis.loglike.createLoglike(config,source)
results = Results(config,loglike,samples)
if samples is not None:
results.load_samples(samples)
return results
def write_results(filename,config,srcfile,samples):
""" Package everything nicely """
results = createResults(config,srcfile,samples=samples)
results.write(filename)
if __name__ == "__main__":
import ugali.utils.parser
parser = ugali.utils.parser.Parser(description=__doc__)
parser.add_config()
parser.add_verbose()
parser.add_argument('--srcmdl',required=True,
help='Source model file')
parser.add_argument('--section',default='source',
help='Section of source file')
parser.add_argument('--samples',required=True,
help='Posterior samples file')
parser.add_argument('outfile',default=None,
help="Output file name")
args = parser.parse_args()
#write_results(args.outfile,args.config,args.srcmdl,args.samples)
results = createResults(args.config,args.srcmdl,samples=args.samples)
results.write(args.outfile)
| |
#!/usr/bin/python
import random
import os
import subprocess as sub
import shlex
import multiprocessing as mp
import numpy as np
##########Modules specific to SR's environment
#import config as cfg
#import parallel
#THIS CODE WILL TAKE SOME EFFORT TO MODIFY FOR A NEW ENVIRONMENT#
#Notes created during development--no guarantee they are all relevant
#####use the X chromosome of males and females to assess FDR and FNR, particularly using my cutoffs
#DONE!!!##import reference information
#DONE#sample names, QC info, (ploidy?), genders
#DONE#cnv cutoffs
#DONE#determine which QC-passing samples have no putative CNVs on the X chromosome
#DONEat the same time, build a information about CNVs on the X chromosome of female cells
#DONEconsider both all putative CNVs, DONE and filtered CNVs
###FDR will be assessed by adding together the data of 2 male X chromosomes and seeing if CNVs are called (expect 0!)
#DONE#using samples where no CNVs called originally
#DONE#don't want to merge samples with extremely different QC metrics...(or do I???)
#DONEcalculate range-scaled distance matrix of QC values
#DONEfor each sample, find the 10 others that are most similar to it
#DONEfilter down the list to only contain unique combinations
###THIS GIVES 582 COMPARISONS
###Will this be enough for convincing comparisons? Might need to expand based on the output I get
#DONE#for each pair
#generate a new lowess file by adding together the X chrom lowess bin counts
#run CBS
#convert the segment data to linear form
###2 ** ratio * 2 should be fine here because there's only one chromosome and it was euploid to start...but I might want to confirm that
#look for the presence of any CNVs
#merge if needed
#assess if any CNVs would be removed by my filtering
#return the number of CNVs with and without filtering
#DONE#statistics to assess
#how many CNVs does filtering remove (both for females and simulated females)
#FDR
#FP rate can be the total number of CNVs from simFem runs / number of runs (bad calls per cell)
#DONEthen the TP+FP is the number of CNVs from realFems / number of realFems (calls made per cell)
#FDR is FP/(TP+FP)
#look at for both putative and filtered CNVs, and compare the difference between the two
###FNR will be assessed by combining portions of male and female X chromosomes
#DONE#using samples where no CNVs called originally
#DONE#general
#losses simulated by replacing a random region in the female X with a random set of contiguous male X bins
#gains simulated by adding together the randomly selected contiguous regions
#use lowess bin counts so differences in sequencing depth shouldn't be an issue (adjust both for ploidy, then use the female ploidy throughout)
#DONE#again don't want to merge samples with extremely different QC metrics...(or do I???)
#DONEcalculate range-scaled distance matrix of QC values
#DONEfor each female sample, find the 10 males that are most similar to it
#DONEfilter down the list to only contain unique combinations
###THIS GIVES 1160 COMPARISONS
###Will this be enough for convincing comparisons? Might need to expand based on the output I get
#DONE#for each pair
#randomly select a cnv size, change direction, and start location
#will need to come up with a list of CNV sizes of interest
#change could only be gain or loss
#start could be anywhere that lets the whole length fit
#mash in male data however is appropriate (see above)
#save a lowess file and run CBS
#assess if the CNV was identified
#at least 50% reciprocal overlap
#correct copy number
#assess if it would be filtered out with my cutoffs or not
#save information: size, copy number, putativeID, filteredID
#repeat these steps some number of times to assess a range of size/location options with the combination
##statistics to assess
#DONEoverall FNR
#number of times sample was not ID'd / number of trials
#for both putative and filtered ID rates
#check for a size dependency of FNR
#if yes, I might want to adjust calculations for reporting purposes...or adjust filtering...we will see...
###calculate euclidian distance of QC variables between two samples###
def getSampleDist(data1, data2):
thisSum = 0
for i in data1.keys():
thisSum += (data1[i] - data2[i]) ** 2
eucDist = thisSum ** 0.5
return eucDist
def getRefInfo(name, xStart, xEnd, absPosDict):
print '\n\n\nPreparing to assess FDR and FNR using the X chromosome of male and female samples'
segDtype = {'names': ('chrom', 'start', 'end'), 'formats': ('S15', 'S15', 'S15')}
###get a list of single cell samples###NEEDS UPDATING TO UNIVERSAL SYSTEM
infile = folders.getInfoFile(name)
data = np.loadtxt(infile, skiprows=1, usecols=[0,7,10], dtype={'names': ('name', 'cells', 'method'), 'formats': ('S10', 'int', 'S10')})
useCells = data[data['cells'] == 1]
useCells = useCells[useCells['method'] == 'VeriSeq']
useCells = list(useCells['name'])
###import currently used CNV filtering cutoffs###
cutoffDict = {}
##########need to change filepaths
#cutoffFile1 = '/filepath/mm10.smallThresholdCutoffs.25k.bowtie.k36.txt'
#cutoffFile2 = '/filepath/mm10.largeThresholdCutoffs.25k.bowtie.k36.txt'
for i in [cutoffFile1, cutoffFile2]:
data = np.loadtxt( i, skiprows=1, dtype={'names': ('size', 'intdist'), 'formats': ('int', 'float')} )
for j in data:
cutoffDict[j['size']] = j['intdist']
for i in range(1, min(cutoffDict.keys())):
cutoffDict[i] = 0.0
###Import QC data for all samples######NEEDS UPDATING TO UNIVERSAL SYSTEM
QCfile = folders.getQCfile(name)
qcData = np.loadtxt(QCfile, dtype={'names': ('Sample', 'Reads', 'MAPD', 'CS', 'Ploidy', 'Gender'), 'formats': ('S10', 'float', 'float', 'float', 'float', 'S1')}, skiprows=1, usecols=[0, 1, 2, 3, 4, 5])
qcData = qcData[qcData['Reads'] >= 600000]
qcData = qcData[qcData['MAPD'] <= 0.38]
qcData = qcData[qcData['CS'] >= 0.83]
ploidyDict = {x['Sample']: x['Ploidy'] for x in qcData}
sampleList = [x for x in qcData['Sample'] if x in useCells]
###Assess which samples have no CNVs on the X chromosome, and save references of the CNVs on Female X chromosomes###
maleCleanList = []
femaleCleanList = []
femaleCells = [x['Sample'] for x in qcData if x['Gender'] == 'F' and x['Sample'] in sampleList]
putativeFemaleCNVs = 0
putFemLarge = 0
putFemSmall = 0
filteredFemaleCNVs = 0
filtFemLarge = 0
filtFemSmall = 0
for i in sampleList:
normalCN = 1.
if i in femaleCells:
normalCN = 2.
##########need to update filepaths
#listFile = /filepath/cnvLists/' + i + '.CNVlist.txt'
data = np.loadtxt(listFile, dtype=segDtype, usecols=[0, 1, 2,])#, skiprows=1)
data = np.atleast_1d(data)
data = data[data['chrom'] == 'chrX']
filteredFemaleCNVs += len(data)
filtFemLarge += sum([1 for x in data if int(x['end']) - int(x['start']) > sizeBoundary])
filtFemSmall += sum([1 for x in data if int(x['end']) - int(x['start']) < sizeBoundary])
##########need to update filepaths
#segFile = '/filepath/' + i + 'k25.segments.txt'
data = np.loadtxt(segFile, dtype={'names': ('start', 'end', 'CN'), 'formats': (int, int, float)})
usableSegs = [x for x,y in enumerate(data) if y['end'] > y['start'] and xStart <= y['start'] <=xEnd]
data = data[usableSegs]
data['CN'] = [2.0 if np.isinf(x) else (2 ** x) * ploidyDict[i] for x in data['CN']]
cnvSegs = data[np.round(data['CN']) != normalCN]
if i not in femaleCells:
if len(cnvSegs) == 0:
maleCleanList.append(i)
else:
if len(cnvSegs) == 0:
femaleCleanList.append(i)
else:
mergeData = [cnvSegs[0]]
putativeFemaleCNVs += len(mergeData)
putFemLarge += sum([1 for x in mergeData if absPosDict[x[1]+1] - absPosDict[x[0]] > sizeBoundary])
putFemSmall += sum([1 for x in mergeData if absPosDict[x[1]+1] - absPosDict[x[0]] < sizeBoundary])
putativeFemaleRate = float(putativeFemaleCNVs) / float(len(femaleCells))
putLargeRate = float(putFemLarge) / float(len(femaleCells))
putSmallRate = float(putFemSmall) / float(len(femaleCells))
filteredFemaleRate = float(filteredFemaleCNVs) / float(len(femaleCells))
filtLargeRate = float(filtFemLarge) / float(len(femaleCells))
filtSmallRate = float(filtFemSmall) / float(len(femaleCells))
print '\t', len(femaleCells), 'female cells have'
print '\t\t', putativeFemaleCNVs, 'putative and', filteredFemaleCNVs, 'filtered X chromosome CNVs'
print '\t\tfor respective rates of', np.round(putativeFemaleRate, 3), 'and', np.round(filteredFemaleRate, 3), 'per cell'
print '\tThere are', len(maleCleanList), 'males and', len(femaleCleanList), 'females with no X chromosome CNVs'
###Determine which samples to combine based on their difference in QC variables###
#I am calculating all pairwise differences in QC statistics, then combining paris of samples with differences in the bottom 15% for males, and in the bottom 5% for females#
qcCompareDict = { x['Sample']: { y: (x[y] - min(qcData[y])) / (max(qcData[y]) - min(qcData[y])) for y in ['Reads', 'MAPD', 'CS'] } for x in qcData }
malePercent = 15
femalePercent = 5
allComparisons = sum([ [(y, z, getSampleDist(qcCompareDict[y], qcCompareDict[z])) for z in maleCleanList[x+1:]] for x,y in enumerate(maleCleanList[:-1]) ], [])
allComparisons = np.array(allComparisons, dtype={'names': ('cell1', 'cell2', 'dist'), 'formats': ('S10', 'S10', 'float')})
FDRcutoff = np.percentile(allComparisons['dist'], malePercent)
FDRcomparisons = allComparisons[allComparisons['dist'] <= FDRcutoff]
print '\t\t', len(FDRcomparisons), 'male X combinations for FDR testing with a distance range of', min(FDRcomparisons['dist']), np.mean(FDRcomparisons['dist']), FDRcutoff
allComparisons = sum([ [(y, z, getSampleDist(qcCompareDict[y], qcCompareDict[z])) for z in maleCleanList] for x,y in enumerate(femaleCleanList) ], [])
allComparisons = np.array(allComparisons, dtype={'names': ('cell1', 'cell2', 'dist'), 'formats': ('S10', 'S10', 'float')})
FNRcutoff = np.percentile(allComparisons['dist'], femalePercent)
FNRcomparisons = allComparisons[allComparisons['dist'] <= FNRcutoff]
print '\t\t', len(FNRcomparisons), 'female X combinations for FNR testing with a distance range of', min(FNRcomparisons['dist']), np.mean(FNRcomparisons['dist']), FNRcutoff
femaleRateDict = {
'putative': {
'all': putativeFemaleRate,
'large': putLargeRate,
'small': putSmallRate,
},
'filtered': {
'all': filteredFemaleRate,
'large': filtLargeRate,
'small': filtSmallRate,
},
}
return cutoffDict, ploidyDict, FDRcomparisons, FNRcomparisons, femaleRateDict
#loads lowess bin count data, adjusts it to copy number form, and returns it#
def loadCountData(name, cell, numBins, binLocs, ploidy):
file = folders.getLowessFile(name, cell, numBins)
data = np.loadtxt(file)
data = (2 ** data[binLocs]) * ploidy
return data
def loadSegData(segFile, usePloidy):
segDtype = dtype={'names': ('start', 'end', 'CN'), 'formats': ('int', 'int', 'float')}
segData = np.loadtxt(segFile, dtype=segDtype)
segData = segData[segData['CN'] != np.inf]
segData = segData[segData['CN'] != -np.inf]
segData['CN'] = (2 ** segData['CN']) *usePloidy
if len(segData) > 1:
mergeData = []
mergeData.append(segData[0])
for i in segData[1:]:
newData = i
if np.round(i['CN']) == np.round(mergeData[-1][2]):
old = mergeData.pop()
newCN = np.average([old[2], i['CN']], weights=[old[1]-old[0], i['end'] - i['start']])
newData = (old[0], i['end'], newCN)
mergeData.append(newData)
segData = np.array(mergeData, dtype=segDtype)
return segData
def runOneFDR(name, numBins, xBins, absPosDict, overwrite, cell1, cell2, ploidy1, ploidy2):
matlabName = cell1 + 'V' + cell2
##########Probably need to update filename
# refFile = folders.getOutDir(name, 'prep') + 'xChromRef.txt'
segDtype = dtype={'names': ('start', 'end', 'CN'), 'formats': ('int', 'int', 'float')}
segFile = folders.getOutDir(name, 'XchromFDR') + matlabName + '.segments.txt'
#only combine samples and run CBS if necessary#
if overwrite or not os.path.exists(segFile):
###Merge X chromosome bin counts and write to file###
data1 = loadCountData(name, cell1, numBins, xBins, ploidy1)
data2 = loadCountData(name, cell2, numBins, xBins, ploidy2)
mergedXdata = data1 + data2
testXdata = np.log2(mergedXdata / np.median(mergedXdata))
##########Probably need to update filename
# dataFile = folders.getOutDir(name, 'XchromFDR') + matlabName + '.lowessBinCounts.txt'
np.savetxt(dataFile, testXdata)
###write Matlab CBS script to file###
##########Probably need to update filename
# scriptFile = folders.getOutDir(name, 'XchromFDR') + matlabName + '.m'
SCRIPT = open(scriptFile, 'w')
importLines = "sampleName = genvarname('" + matlabName + "');\n"
importLines += "sampleData = importdata('" + dataFile + "');\n"
importLines += "ref = importdata('" + refFile + "');\n"
refLines = "chroms = zeros(length(ref.textdata), 1);\n\n"
refLines += "for i = 1:length(ref.textdata)\n chrom = textscan(ref.textdata{i},'%s %s','delimiter','r');\n chroms(i) = 20;\nend\n\n"
refLines += "bins = ref.data;\nlogLowess = sampleData;\nclear ref sampleData\n\n"
CBSlines = "cbsInput = [chroms, bins, logLowess];\ncbsOutput = cghcbs(cbsInput, 'ALPHA', " + str(0.01) + ", 'PERMUTATIONS', 10000, 'STOPPINGRULE', false, 'SHOWPLOT', false);\n\n"
CBSlines += "numOfSegments = 0;\nfor i = 1:length(cbsOutput.SegmentData)\n for j = 1:length(cbsOutput.SegmentData(1, i).Mean)\n numOfSegments = numOfSegments + 1;\n end\nend\n\n"
CBSlines += "segments = zeros(length(numOfSegments), 3);\nk = 0;\nfor i = 1:length(cbsOutput.SegmentData)\n for j = 1:length(cbsOutput.SegmentData(1, i).Mean)\n k = k + 1;\n segments(k, 1) = cbsOutput.SegmentData(1, i).Start(j);\n segments(k, 2) = cbsOutput.SegmentData(1, i).End(j);\n segments(k, 3) = cbsOutput.SegmentData(1, i).Mean(j);\n end\nend\n\n"
CBSlines += "segFile = fopen([sampleName '.segments.txt'], 'w');\nfor i = 1:numOfSegments\n fprintf(segFile,'%d\t',segments(i,1));\n fprintf(segFile,'%d\t',segments(i,2));\n fprintf(segFile,'%.15f\\n',segments(i,3));\nend\n\n"
lastLine = "quit\n"
allLines = '\n'.join([importLines, refLines, CBSlines, lastLine])
SCRIPT.write(allLines)
SCRIPT.close()
###run CBS###
##########THIS MAY BE HARD TO RUN ON MANY SYSTEMS, need access to Matlab from the command line
matlabCommand = "matlab -nodisplay -r " + matlabName
##########Probably need to update filename
# stderrfile = folders.getOutDir(name, 'XchromFDR') + matlabName + '.stderr'
cmd = shlex.split(matlabCommand)
stdout = open(stderrfile, 'w')
p = sub.Popen(cmd, stdout=stdout, stderr=sub.STDOUT)
p.wait()
os.remove('./' + matlabName + '.m')
os.remove('./' + matlabName + '.stderr')
###assess the presence of false positive CNVs###
segData = loadSegData(segFile, 2.)
segCNVs = segData[np.round(segData['CN']) != 2.]
segInfo = []
for i in segCNVs:
binStart = absPosDict[i['start']]
binEnd = absPosDict[i['end']]
size = binEnd - binStart
CN = np.round(i['CN'])
intdist = abs(i['CN'] - CN)
segInfo.append( (matlabName, binStart, binEnd, CN, size, intdist) )
# print '\t', segInfo
return segInfo
def runFDR(name, overwrite, FDRcomparisons, femaleRateDict, ploidyDict, xBins, numBins, absPosDict, cutoffDict):
print '\n\n\nAssessing FDR by combining male X chromosome bin counts'
##########probably need to update filename
# os.chdir(folders.getOutDir(name, 'XchromFDR'))
resultDtype = {'names': ('cells', 'start', 'end', 'CN', 'size', 'intdist'), 'formats': ('S20', 'int', 'int', 'float', 'int', 'float')}
distanceDict = {x[0] + 'V' + x[1]: x[2] for x in FDRcomparisons}
argList = [(name, numBins, xBins, absPosDict, overwrite, x[0], x[1], ploidyDict[x[0]], ploidyDict[x[1]]) for x in FDRcomparisons]
pool = mp.Pool(mp.cpu_count())
processes = [ pool.apply_async(runOneFDR, args=x) for x in argList ]
pool.close()
results = len(argList) * [ [] ]
for i,j in enumerate(processes):
j.wait()
results[i] = j.get()
print '\nFinished segmenting merged male X chromosome bin counts'
resultsArray = np.array(sum([x for x in results if len(x) > 0], []), dtype=resultDtype)
putativeCNVnum = len(resultsArray)
putativeCNVrate = float(putativeCNVnum) / float(len(FDRcomparisons))
putLargeRate = sum([1. for x in resultsArray if x['size'] > sizeBoundary]) / float(len(FDRcomparisons))
putSmallRate = sum([1. for x in resultsArray if x['size'] < sizeBoundary]) / float(len(FDRcomparisons))
putFDRall = putativeCNVrate / femaleRateDict['putative']['all']
putFDRlarge = putLargeRate / femaleRateDict['putative']['large']
putFDRsmall = putSmallRate / femaleRateDict['putative']['small']
filteredResultsArray = np.array([x for x in resultsArray if cutoffDict[x['size']] >= x['intdist']], dtype=resultsArray.dtype)
filteredCNVnum = len(filteredResultsArray)
filteredCNVrate = float(filteredCNVnum) / float(len(FDRcomparisons))
filtLargeRate = sum([1. for x in filteredResultsArray if x['size'] > sizeBoundary]) / float(len(FDRcomparisons))
filtSmallRate = sum([1. for x in filteredResultsArray if x['size'] < sizeBoundary]) / float(len(FDRcomparisons))
filtFDRall = filteredCNVrate / femaleRateDict['filtered']['all']
filtFDRlarge = filtLargeRate / femaleRateDict['filtered']['large']
filtFDRsmall = filtSmallRate / femaleRateDict['filtered']['small']
percentDiffAll = (filtFDRall - putFDRall) / putFDRall
percentDiffLarge = (filtFDRlarge - putFDRlarge) / putFDRlarge
percentDiffSmall = (filtFDRsmall - putFDRsmall) / putFDRsmall
foldDiffAll = putFDRall / filtFDRall
try:
foldDiffLarge = putFDRlarge / filtFDRlarge
except ZeroDivisionError:
foldDiffLarge = 'infinity'
foldDiffSmall = putFDRsmall / filtFDRsmall
print '\tAfter', len(FDRcomparisons), 'iterations'
print '\t\t', putativeCNVnum, 'total putative CNVs were identified for a rate of', putativeCNVrate, 'CNVs per cell'
print '\t\t\tComparing to', femaleRateDict['putative']['all'], 'this results in an FDR of', np.round(100 * putFDRall, 3), 'percent of all putative diploid X chrom calls'
print '\t\t\tComparing', putLargeRate, 'to', femaleRateDict['putative']['large'], 'this results in an FDR of', np.round(100 * putFDRlarge, 3), 'percent of large putative diploid X chrom calls'
print '\t\t\tComparing', putSmallRate, 'to', femaleRateDict['putative']['small'], 'this results in an FDR of', np.round(100 * putFDRsmall, 3), 'percent of small putative diploid X chrom calls'
print '\t\t', filteredCNVnum, 'CNVs remain after filtering for a rate of', filteredCNVrate, 'CNVs per cell'
print '\t\t\tComparing to', femaleRateDict['filtered']['all'], 'results in an FDR of', np.round(100 * filtFDRall, 3), 'percent of all filtered diploid X chrom calls'
print '\t\t\tComparing', filtLargeRate, 'to', femaleRateDict['filtered']['large'], 'results in an FDR of', np.round(100 * filtFDRlarge, 3), 'percent of large filtered diploid X chrom calls'
print '\t\t\tComparing', filtSmallRate, 'to', femaleRateDict['filtered']['small'], 'results in an FDR of', np.round(100 * filtFDRsmall, 3), 'percent of small filtered diploid X chrom calls'
print '\t\tPercent change and fold difference of FDR from filtering'
print '\t\t\tAll CNVs\t', 100 * percentDiffAll, '\t', foldDiffAll
print '\t\t\tLarge CNVs\t', 100 * percentDiffLarge, '\t', foldDiffLarge
print '\t\t\tSmallCNVs\t', 100 * percentDiffSmall, '\t', foldDiffSmall
def runOneFNR(female, male, distance, femalePloidy, malePloidy, repeats, name, numBins, xBins, absPosDict, cutoffDict):
###misc imports and variables###
##########Probably need to update filename
#refFile = folders.getOutDir(name, 'prep') + 'xChromRef.txt'
#parameterOptions#
CNoptions = [1, 3]
# sizeOptions = [np.arange(3, 11), np.arange(11, 28), np.arange(28, 101), np.arange(101, 1315)]
sizeOptions = [[3]]
while sizeOptions[-1][-1] != 1314:
prevSize = len(sizeOptions[-1])
thisSize = prevSize * 2
thisStart = sizeOptions[-1][-1]+1
thisEnd = thisStart + thisSize
theseOptions = np.arange(thisStart, min(thisEnd, 1315))
sizeOptions.append(theseOptions)
comboName = female + 'V' + male
femaleData = loadCountData(name, female, numBins, xBins, femalePloidy)
maleData = loadCountData(name, male, numBins, xBins, malePloidy)
#NOTE this end index is one greater than the modified bins for ease of python indexing strategy#
#starts and ends are in the xChrom bin location, starting from 0 on the chromosome#
resultDtype = {'names': ('name', 'distance', 'rep',
'start', 'end', 'size', 'CN',
'segStart', 'segEnd', 'segSize', 'segCN', 'segIntDist', 'putativeID', 'filteredID'),
'formats': ('S20', 'float', 'int',
'int', 'int', 'int', 'int',
'int', 'int', 'int', 'float', 'float', 'bool', 'bool')}
resultsArray = np.zeros(repeats, dtype = resultDtype)
##########Probably need to update filename
# refoutfile = folders.getOutDir(name, 'XchromFNR') + comboName + '.reference.txt'
if os.path.exists(refoutfile):
REF = open(refoutfile, 'a+')
allData = REF.read()
allDataList = allData.split('\n')[1:]
for i,j in enumerate(allDataList):
if i == len(resultsArray):
break
repData = j.split('\t')
resultsArray[i]['name'] = comboName
resultsArray[i]['distance'] = distance
resultsArray[i]['rep'] = int(repData[0])
resultsArray[i]['start'] = int(repData[1])
resultsArray[i]['end'] = int(repData[2])
resultsArray[i]['size'] = int(repData[2]) - int(repData[1])
resultsArray[i]['CN'] = int(repData[3])
REF.flush()
else:
REF = open(refoutfile, 'w')
REF.write('Repeat\tStart\tEnd\tCN')
for i,j in enumerate(resultsArray):
thisRep = i+1
matlabName = comboName + 'R' + str(thisRep)
##########Probably need to update filename
# segFile = folders.getOutDir(name, 'XchromFNR') + matlabName + '.segments.txt'
###processing only if this simulation has not already been run (will need to delete all files if I change strategies again)###
if j['rep'] != thisRep:
testData = np.copy(femaleData)
j['name'] = comboName
j['distance'] = distance
j['rep'] = thisRep
###determine simulation iteration variables (start, stop, ((size)), copy number)###
j['CN'] = random.choice(CNoptions)
sizeGroup = random.choice(sizeOptions)
j['size'] = random.choice(sizeGroup)
j['start'] = random.randint(0, len(xBins) - j['size'])
j['end'] = j['start'] + j['size']
REF.write('\n')
REF.write( str( '\t'.join( map(str, [thisRep, j['start'], j['end'], j['CN']]) ) ) )
###merge bin data, re-log-correct and save it###
#losses simulated by replacing a random region in the female X with a random set of contiguous male X bins
#gains simulated by adding together the randomly selected contiguous regions
if j['CN'] == 1:
testData[j['start']:j['end']] = maleData[j['start']:j['end']]
elif j['CN'] == 3:
testData[j['start']:j['end']] = femaleData[j['start']:j['end']] + maleData[j['start']:j['end']]
testData = np.log2(testData / femalePloidy)
##########Probably need to update filename
# dataFile = folders.getOutDir(name, 'XchromFNR') + matlabName + '.lowessBinCounts.txt'
np.savetxt(dataFile, testData)
###write and run matlab script###
importLines = "sampleName = genvarname('" + matlabName + "');\n"
importLines += "sampleData = importdata('" + dataFile + "');\n"
importLines += "ref = importdata('" + refFile + "');\n"
refLines = "chroms = zeros(length(ref.textdata), 1);\n\n"
refLines += "for i = 1:length(ref.textdata)\n chrom = textscan(ref.textdata{i},'%s %s','delimiter','r');\n chroms(i) = 20;\nend\n\n"
refLines += "bins = ref.data;\nlogLowess = sampleData;\nclear ref sampleData\n\n"
CBSlines = "cbsInput = [chroms, bins, logLowess];\ncbsOutput = cghcbs(cbsInput, 'ALPHA', " + str(0.01) + ", 'PERMUTATIONS', 10000, 'STOPPINGRULE', false, 'SHOWPLOT', false);\n\n"
CBSlines += "numOfSegments = 0;\nfor i = 1:length(cbsOutput.SegmentData)\n for j = 1:length(cbsOutput.SegmentData(1, i).Mean)\n numOfSegments = numOfSegments + 1;\n end\nend\n\n"
CBSlines += "segments = zeros(length(numOfSegments), 3);\nk = 0;\nfor i = 1:length(cbsOutput.SegmentData)\n for j = 1:length(cbsOutput.SegmentData(1, i).Mean)\n k = k + 1;\n segments(k, 1) = cbsOutput.SegmentData(1, i).Start(j);\n segments(k, 2) = cbsOutput.SegmentData(1, i).End(j);\n segments(k, 3) = cbsOutput.SegmentData(1, i).Mean(j);\n end\nend\n\n"
CBSlines += "segFile = fopen([sampleName '.segments.txt'], 'w');\nfor i = 1:numOfSegments\n fprintf(segFile,'%d\t',segments(i,1));\n fprintf(segFile,'%d\t',segments(i,2));\n fprintf(segFile,'%.15f\\n',segments(i,3));\nend\n\n"
lastLine = "quit\n"
allLines = '\n'.join([importLines, refLines, CBSlines, lastLine])
##########Probably need to update filename
# scriptFile = folders.getOutDir(name, 'XchromFNR') + matlabName + '.m'
SCRIPT = open(scriptFile, 'w')
SCRIPT.write(allLines)
SCRIPT.close()
##########Probably need to update filename
# stderrfile = folders.getOutDir(name, 'XchromFNR') + matlabName + '.stderr'
stdout = open(stderrfile, 'w')
matlabCommand = "matlab -nodisplay -r " + matlabName
cmd = shlex.split(matlabCommand)
p = sub.Popen(cmd, stdout=stdout, stderr=sub.STDOUT)
p.wait()
os.remove('./' + matlabName + '.m')
os.remove('./' + matlabName + '.stderr')
###assess if each CNV was identified and return information###
# (cells, simNumber, start, stop, size, copy number, segStart, segStop, segSize, segCN, segIntdist, putativeID, filteredID)#
segData = loadSegData(segFile, femalePloidy)
segData['start'] = [absPosDict[x] - xBins[0] for x in segData['start']]
segData['end'] = [absPosDict[x] - xBins[0] for x in segData['end']]
cnvSeg = segData[np.argmax([min(x['end'], j['end']) - max(x['start'], j['start']) for x in segData])]
j['segStart'] = cnvSeg['start']
j['segEnd'] = cnvSeg['end']
j['segSize'] = cnvSeg['end'] - cnvSeg['start']
j['segCN'] = cnvSeg['CN']
j['segIntDist'] = abs(cnvSeg['CN'] - np.round(cnvSeg['CN']))
if 0.5 <= float(j['segSize']) / float(j['size']) <= 2 and np.round(j['segCN']) == j['CN']:
j['putativeID'] = True
if cutoffDict[j['segSize']] >= j['segIntDist']:
j['filteredID'] = True
REF.close()
return list(resultsArray)
def runFNR(name, numBins, repeats, overwrite, ploidyDict, xBins, absPosDict, cutoffDict, FNRcomparisons):
print '\n\n\nAssessing FNR by spiking male X chromosome bin counts to the female X chromosome'
##########Probably need to update filename
# os.chdir(folders.getOutDir(name, 'XchromFNR'))
resultDtype = {'names': ('name', 'distance', 'rep',
'start', 'end', 'size', 'CN',
'segStart', 'segEnd', 'segSize', 'segCN', 'segIntDist', 'putativeID', 'filteredID'),
'formats': ('S20', 'float', 'int',
'int', 'int', 'int', 'int',
'int', 'int', 'int', 'float', 'float', 'bool', 'bool')}
##########Probably need to update filename
# outfilename = folders.getOutDir(name, 'stats') + 'xChrom.FNRtestData.txt'
thisHeader = 'Comparison\tDistance\tRepeat\tSpikeinStart\tSpikeinEnd\tSpikeinSize\tSpikeinCN\tSegStart\tSegEnd\tSegSize\tSegCN\tSegIntDist\tSegPutativeID\tSegFileredID'
if overwrite or not os.path.exists(outfilename):
argList = [(x[0], x[1], x[2], ploidyDict[x[0]], ploidyDict[x[1]], repeats, name, numBins, xBins, absPosDict, cutoffDict,) for x in FNRcomparisons]
pool = mp.Pool(mp.cpu_count())
processes = [ pool.apply_async(runOneFDR, args=x) for x in argList ]
pool.close()
results = len(argList) * [ [] ]
for i,j in enumerate(processes):
j.wait()
results[i] = j.get()
print '\nFinished segmenting male X spike-ins'
resultsArray = np.array(sum(results, []), dtype=resultDtype)
np.savetxt(outfilename, resultsArray, fmt='%s', delimiter='\t', comments='', header=thisHeader)
CNVnum = float(len(resultsArray))
putativeID = resultsArray[resultsArray['putativeID'] == True]
filteredID = resultsArray[resultsArray['filteredID'] == True]
putativeFNR = 1. - (float(len(putativeID)) / CNVnum)
filteredFNR = 1. - (float(len(filteredID)) / CNVnum)
else:
resultsArray = np.genfromtxt(outfilename, skiprows=1, dtype=resultDtype)
def getFilteredID(putative, bins, intdist):
thisTest = False
if putative and cutoffDict[bins] >= intdist:
thisTest = True
return thisTest
resultsArray['filteredID'] = [ getFilteredID(x['putativeID'], x['segSize'], x['segIntDist']) for x in resultsArray]
np.savetxt(outfilename, resultsArray, fmt='%s', delimiter='\t', comments='', header=thisHeader)
failData = resultsArray[resultsArray['putativeID'] == False]
IDdata = resultsArray[resultsArray['putativeID'] == True]
filtData = resultsArray[resultsArray['filteredID'] == True]
putFNRall = 100 * (1. - (float(len(IDdata)) / float(len(resultsArray))))
filtFNRall = 100 * (1. - (float(len(filtData)) / float(len(resultsArray))))
largeData = resultsArray[resultsArray['size'] > sizeBoundary]
largePutID = largeData[largeData['putativeID'] == True]
largeFiltID = largeData[largeData['filteredID'] == True]
putFNRlarge = 100 * (1. - (float(len(largePutID)) / float(len(largeData))))
filtFNRlarge = 100 * (1. - (float(len(largeFiltID)) / float(len(largeData))))
smallData = resultsArray[resultsArray['size'] < sizeBoundary]
smallPutID = smallData[smallData['putativeID'] == True]
smallFiltID = smallData[smallData['filteredID'] == True]
putFNRsmall = 100 * (1. - (float(len(smallPutID)) / float(len(smallData))))
filtFNRsmall = 100 * (1. - (float(len(smallFiltID)) / float(len(smallData))))
print '\tAssessing all', len(resultsArray), 'CNVs'
print '\t\t', len(IDdata), 'putative CNVs were identified for a FNR of', putFNRall, '%'
print '\t\t', len(filtData), 'filtered CNVs were identified for a FNR of', filtFNRall, '%'
print '\t\t', np.round(100 * ((filtFNRall - putFNRall) / putFNRall), 3), '% increase and', np.round(filtFNRall / putFNRall, 3), 'fold change after filtering'
print '\tAssessing', len(largeData), 'large CNVs'
print '\t\t', len(largePutID), 'putative CNVs were identified for a FNR of', putFNRlarge, '%'
print '\t\t', len(largeFiltID), 'filtered CNVs were identified for a FNR of', filtFNRlarge, '%'
print '\t\t', np.round(100 * ((filtFNRlarge - putFNRlarge) / putFNRlarge), 3), '% increase and', np.round(filtFNRlarge / putFNRlarge, 3), 'fold change after filtering'
print '\tAssessing', len(smallData), 'small CNVs'
print '\t\t', len(smallPutID), 'putative CNVs were identified for a FNR of', putFNRsmall, '%'
print '\t\t', len(smallFiltID), 'filtered CNVs were identified for a FNR of', filtFNRsmall, '%'
print '\t\t', np.round(100 * ((filtFNRsmall - putFNRsmall) / putFNRsmall), 3), '% increase and', np.round(filtFNRsmall / putFNRsmall, 3), 'fold change after filtering'
def runAll(name, numBins, repeats, xStart, xEnd, xBins, absPosDict, overwrite):
cutoffDict, ploidyDict, FDRcomparisons, FNRcomparisons, femaleRateDict = getRefInfo(name, xStart, xEnd, absPosDict)
runFDR(name, overwrite, FDRcomparisons, femaleRateDict, ploidyDict, xBins, numBins, absPosDict, cutoffDict)
runFNR(name, numBins, repeats, overwrite, ploidyDict, xBins, absPosDict, cutoffDict, FNRcomparisons)
sizeBoundary = 25.5#####
##########LEFTOVER FROM ORIGINAL CODE, will hopefully be replaced by fixing comments above
folders = cfg.Folders()
| |
import time
import objc
from Foundation import NSKeyValueObservingOptionNew, NSKeyValueObservingOptionOld, NSNotFound
from AppKit import *
from nsSubclasses import getNSSubclass
from vanillaBase import VanillaBaseObject, VanillaError, VanillaCallbackWrapper
# first, determine which column autosizing method is needed.
# in 10.4, NSTableView.setAutoresizesAllColumnsToFit was
# deprecated. The new way for handling this is via masks.
try:
NSTableViewUniformColumnAutoresizingStyle
NSTableColumn.setResizingMask_
except (NameError, AttributeError):
_haveResizingMasks = False
else:
_haveResizingMasks = True
class VanillaTableViewSubclass(NSTableView):
def keyDown_(self, event):
didSomething = self.vanillaWrapper()._keyDown(event)
if not didSomething:
super(VanillaTableViewSubclass, self).keyDown_(event)
def textDidEndEditing_(self, notification):
info = notification.userInfo()
if info["NSTextMovement"] in [NSReturnTextMovement, NSTabTextMovement, NSBacktabTextMovement]:
# This is ugly, but just about the only way to do it.
# NSTableView is determined to select and edit something else,
# even the text field that it just finished editing, unless we
# mislead it about what key was pressed to end editing.
info = dict(info) # make a copy
info["NSTextMovement"] = NSIllegalTextMovement
newNotification = NSNotification.notificationWithName_object_userInfo_(
notification.name(),
notification.object(),
info)
super(VanillaTableViewSubclass, self).textDidEndEditing_(newNotification)
self.window().makeFirstResponder_(self)
else:
super(VanillaTableViewSubclass, self).textDidEndEditing_(notification)
class _VanillaTableViewSubclass(VanillaTableViewSubclass):
def init(self):
from warnings import warn
warn(DeprecationWarning("_VanillaTableViewSubclass is deprecated. Use VanillaTableViewSubclass"))
return super(_VanillaTableViewSubclass, self).init()
class VanillaArrayControllerObserver(NSObject):
def observeValueForKeyPath_ofObject_change_context_(self, keyPath, obj, change, context):
if hasattr(self, "_targetMethod") and self._targetMethod is not None:
self._targetMethod()
class _VanillaArrayControllerObserver(VanillaArrayControllerObserver):
def init(self):
from warnings import warn
warn(DeprecationWarning("_VanillaArrayControllerObserver is deprecated. Use VanillaArrayControllerObserver"))
return super(_VanillaArrayControllerObserver, self).init()
class VanillaArrayController(NSArrayController):
def tableView_writeRowsWithIndexes_toPasteboard_(self,
tableView, indexes, pboard):
vanillaWrapper = tableView.vanillaWrapper()
settings = vanillaWrapper._dragSettings
if settings is None:
return False
indexes = list(vanillaWrapper._iterIndexSet(indexes))
indexes = vanillaWrapper._getUnsortedIndexesFromSortedIndexes(indexes)
packCallback = settings["callback"]
if packCallback is not None:
objects = packCallback(vanillaWrapper, indexes)
if not isinstance(objects, NSArray):
objects = NSArray.arrayWithArray_(objects)
else:
objects = NSMutableArray.array()
for index in indexes:
obj = vanillaWrapper[index]
objects.addObject_(obj)
dragType = settings["type"]
pboard.declareTypes_owner_([dragType], self)
pboard.setPropertyList_forType_(objects.description(), dragType)
return True
def _handleDrop(self, isProposal, tableView, draggingInfo, row, dropOperation):
vanillaWrapper = tableView.vanillaWrapper()
draggingSource = draggingInfo.draggingSource()
sourceForCallback = draggingSource
if hasattr(draggingSource, "vanillaWrapper") and getattr(draggingSource, "vanillaWrapper") is not None:
sourceForCallback = getattr(draggingSource, "vanillaWrapper")()
# make the info dict
dropOnRow = dropOperation == NSTableViewDropOn
dropInformation = dict(isProposal=isProposal, dropOnRow=dropOnRow, rowIndex=row, data=None, source=sourceForCallback)
# drag from self
if draggingSource == tableView:
if vanillaWrapper._selfDropSettings is None:
return NSDragOperationNone
settings = vanillaWrapper._selfDropSettings
return self._handleDropBasedOnSettings(settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation)
# drag from same window
window = tableView.window()
if window is not None and draggingSource is not None and window == draggingSource.window() and vanillaWrapper._selfWindowDropSettings is not None:
if vanillaWrapper._selfWindowDropSettings is None:
return NSDragOperationNone
settings = vanillaWrapper._selfWindowDropSettings
return self._handleDropBasedOnSettings(settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation)
# drag from same document
document = tableView.window().document()
if document is not None and draggingSource is not None and document == draggingSource.window().document():
if vanillaWrapper._selfDocumentDropSettings is None:
return NSDragOperationNone
settings = vanillaWrapper._selfDocumentDropSettings
return self._handleDropBasedOnSettings(settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation)
# drag from same application
applicationWindows = NSApp().windows()
if draggingSource is not None and draggingSource is not None and draggingSource.window() in applicationWindows:
if vanillaWrapper._selfApplicationDropSettings is None:
return NSDragOperationNone
settings = vanillaWrapper._selfApplicationDropSettings
return self._handleDropBasedOnSettings(settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation)
# fall back to drag from other application
if vanillaWrapper._otherApplicationDropSettings is None:
return NSDragOperationNone
settings = vanillaWrapper._otherApplicationDropSettings
return self._handleDropBasedOnSettings(settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation)
def _handleDropBasedOnSettings(self, settings, vanillaWrapper, dropOnRow, draggingInfo, dropInformation):
# handle drop position
validDropPosition = self._validateDropPosition(settings, dropOnRow)
if not validDropPosition:
return NSDragOperationNone
# unpack data
dropInformation["data"] = self._unpackPboard(settings, draggingInfo)
# call the callback
result = settings["callback"](vanillaWrapper, dropInformation)
if result:
return settings.get("operation", NSDragOperationCopy)
return NSDragOperationNone
def _validateDropPosition(self, settings, dropOnRow):
if dropOnRow and not settings.get("allowsDropOnRows", False):
return False
if not dropOnRow and not settings.get("allowsDropBetweenRows", True):
return False
return True
def _unpackPboard(self, settings, draggingInfo):
pboard = draggingInfo.draggingPasteboard()
data = pboard.propertyListForType_(settings["type"])
if isinstance(data, (NSString, objc.pyobjc_unicode)):
data = data.propertyList()
return data
def tableView_validateDrop_proposedRow_proposedDropOperation_(self,
tableView, draggingInfo, row, dropOperation):
return self._handleDrop(True, tableView, draggingInfo, row, dropOperation)
def tableView_acceptDrop_row_dropOperation_(self,
tableView, draggingInfo, row, dropOperation):
return self._handleDrop(False, tableView, draggingInfo, row, dropOperation)
# 10.6
def tableView_objectValueForTableColumn_row_(self,
tableView, column, row):
content = self.content()
columnID = column.identifier()
item = content[row]
if isinstance(item, NSDictionary):
if columnID not in item:
return
else:
return item[columnID]
else:
return getattr(item, columnID)()
def numberOfRowsInTableView_(self, view):
return len(self.content())
class _VanillaArrayController(VanillaArrayController):
def init(self):
from warnings import warn
warn(DeprecationWarning("_VanillaArrayController is deprecated. Use VanillaArrayController"))
return super(_VanillaArrayController, self).init()
class List(VanillaBaseObject):
"""
A control that shows a list of items. These lists can contain one or more columns.
A single column example::
from vanilla import *
class ListDemo(object):
def __init__(self):
self.w = Window((100, 100))
self.w.myList = List((0, 0, -0, -0), ["A", "B", "C"],
selectionCallback=self.selectionCallback)
self.w.open()
def selectionCallback(self, sender):
print sender.getSelection()
ListDemo()
A mutliple column example::
from vanilla import *
class ListDemo(object):
def __init__(self):
self.w = Window((100, 100))
self.w.myList = List((0, 0, -0, -0),
[{"One": "A", "Two": "a"}, {"One": "B", "Two": "b"}],
columnDescriptions=[{"title": "One"}, {"title": "Two"}],
selectionCallback=self.selectionCallback)
self.w.open()
def selectionCallback(self, sender):
print sender.getSelection()
ListDemo()
List objects behave like standard Python lists. For xample, given this List:::
self.w.myList = List((10, 10, 200, 100), ["A", "B", "C"])
The following Python list methods work:::
# Getting the length of the List.
>>> len(self.w.myList)
3
# Retrieving an item or items from a List.
>>> self.w.myList[1]
"B"
>>> self.w.myList[:2]
["A", "B"]
# Setting an item in a List.
>>> self.w.myList[1] = "XYZ"
>>> self.w.myList.get()
["A", "XYZ", "C"]
# Deleting an item at an index in a List.
>>> del self.w.myList[1]
>>> self.w.myList.get()
["A", "C"]
# Appending an item to a List.
>>> self.w.myList.append("Z")
>>> self.w.myList.get()
["A", "B", "C", "Z"]
# Removing the first occurance of an item in a List.
>>> self.w.myList.remove("A")
>>> self.w.myList.get()
["B", "C"]
# Getting the index for the first occurance of an item in a List.
>>> self.w.myList.index("B")
1
# Inserting an item into a List.
>>> self.w.myList.insert(1, "XYZ")
>>> self.w.myList.get()
["A", "XYZ", "B", "C"]
# Extending a List.
>>> self.w.myList.extend(["X", "Y", "Z"])
>>> self.w.myList.get()
["A", "B", "C", "X", "Y", "Z"]
# Iterating over a List.
>>> for i in self.w.myList:
>>> i
"A"
"B"
"C"
**posSize** Tuple of form *(left, top, width, height)* representing the
position and size of the list.
**items** The items to be displayed in the list. In the case of multiple
column lists, this should be a list of dictionaries with the data for
each column keyed by the column key as defined in columnDescriptions.
If you intend to use a dataSource, *items* must be *None*.
**dataSource** A Cocoa object supporting the *NSTableDataSource*
protocol. If *dataSource* is given, *items* must be *None*.
**columnDescriptions** An ordered list of dictionaries describing the
columns. This is only necessary for multiple column lists.
+--------------------------------+--------------------------------------------------------------------------------+
| *"title"* | The title to appear in the column header. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"key"* (optional) | The key from which this column should get |
| | its data from each dictionary in *items*. If |
| | nothing is given, the key will be the string |
| | given in *title*. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"formatter"* (optional) | An `NSFormatter` <http://tinyurl.com/NSFormatter>`_ |
| | for cntrolling the display and input of the |
| | column's cells. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"cell"* (optional) | A cell type to be displayed in the column. |
| | If nothing is given, a text cell is used. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"editable"* (optional) | Enable or disable editing in the column. If |
| | nothing is given, it will follow the |
| | editability of the rest of the list. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"width"* (optional) | The width of the column. In OS 10.3 and |
| | lower the width must be defined for *all* |
| | columns if the width is defined for one |
| | column. |
+--------------------------------+--------------------------------------------------------------------------------+
| *"typingSensitive"* (optional) | A boolean representing that this column |
| | should be the column that responds to user |
| | key input. Only one column can be flagged as |
| | True. If no column is flagged, the first |
| | column will automatically be flagged. |
+--------------------------------+--------------------------------------------------------------------------------+
| *binding* (optional) | A string indicating which `binding object <http://tinyurl.com/CocoaBindings>`_ |
| | the column's cell should be bound to. By |
| | default, this is "value." You should only |
| | override this in very specific cases. |
+--------------------------------+--------------------------------------------------------------------------------+
**showColumnTitles** Boolean representing if the column titles should be shown or not.
Column titles will not be shown in single column lists.
**selectionCallback** Callback to be called when the selection in the list changes.
**doubleClickCallback** Callback to be called when an item is double clicked.
**editCallback** Callback to be called after an item has been edited.
**enableDelete** A boolean representing if items in the list can be deleted via the interface.
**enableTypingSensitivity** A boolean representing if typing in the list will jump to the
closest match as the entered keystrokes. *Available only in single column lists.*
**allowsMultipleSelection** A boolean representing if the list allows more than one item to be selected.
**allowsEmptySelection** A boolean representing if the list allows zero items to be selected.
**drawVerticalLines** Boolean representing if vertical lines should be drawn in the list.
**drawHorizontalLines** Boolean representing if horizontal lines should be drawn in the list.
**drawFocusRing** Boolean representing if the standard focus ring should be drawn when the list is selected.
**rowHeight** The height of the rows in the list.
**autohidesScrollers** Boolean representing if scrollbars should automatically be hidden if possible.
**selfDropSettings** A dictionary defining the drop settings when the source of the drop
is this list. The dictionary form is described below.
**selfWindowDropSettings** A dictionary defining the drop settings when the source of the drop
is contained the same document as this list. The dictionary form is described below.
**selfDocumentDropSettings** A dictionary defining the drop settings when the source of the drop
is contained the same window as this list. The dictionary form is described below.
**selfApplicationDropSettings** A dictionary defining the drop settings when the source of the drop
is contained the same application as this list. The dictionary form is described below.
**otherApplicationDropSettings** A dictionary defining the drop settings when the source of the drop
is contained an application other than the one that contains this list. The dictionary form is described below.
The drop settings dictionaries should be of this form:
+-----------------------------------+--------------------------------------------------------------------+
| *type* | A single drop type indicating what drop types the list accepts. |
| | For example, NSFilenamesPboardType or "MyCustomPboardType". |
+-----------------------------------+--------------------------------------------------------------------+
| *operation* (optional) | A "drag operation <http://tinyurl.com/NSDraggingIn>`_ that |
| | the list accepts. The default is *NSDragOperationCopy*. |
+-----------------------------------+--------------------------------------------------------------------+
| *allowDropBetweenRows* (optional) | A boolean indicating if the list accepts drops between rows. |
| | The default is True. |
+-----------------------------------+--------------------------------------------------------------------+
| *allowDropOnRow* (optional) | A boolean indicating if the list accepts drops on rows. |
| | The default is False. |
+-----------------------------------+--------------------------------------------------------------------+
| *callback* | Callback to be called when a drop is proposed and when a drop |
| | is to occur. This method should return a boolean representing |
| | if the drop is acceptable or not. This method must accept *sender* |
| | and *dropInfo* arguments. The _dropInfo_ will be a dictionary as |
| | described below. |
+-----------------------------------+--------------------------------------------------------------------+
The dropInfo dictionary passed to drop callbacks will be of this form:
+--------------+--------------------------------------------------------------------------------------------+
| *data* | The data proposed for the drop. This data will be of the type specified by dropDataFormat. |
+--------------+--------------------------------------------------------------------------------------------+
| *rowIndex* | The row where the drop is proposed. |
+--------------+--------------------------------------------------------------------------------------------+
| *source* | The source from which items are being dragged. If this object is wrapped by Vanilla, the |
| | Vanilla object will be passed as the source. |
+--------------+--------------------------------------------------------------------------------------------+
| *dropOnRow* | A boolean representing if the row is being dropped on. If this is False, the drop should |
| | occur between rows. |
+--------------+--------------------------------------------------------------------------------------------+
| *isProposal* | A boolean representing if this call is simply proposing the drop or if it is time to |
| | accept the drop. |
+--------------+--------------------------------------------------------------------------------------------+
"""
nsScrollViewClass = NSScrollView
nsTableViewClass = VanillaTableViewSubclass
nsArrayControllerClass = VanillaArrayController
nsArrayControllerObserverClass = VanillaArrayControllerObserver
def __init__(self, posSize, items, dataSource=None, columnDescriptions=None, showColumnTitles=True,
selectionCallback=None, doubleClickCallback=None, editCallback=None,
enableDelete=False, enableTypingSensitivity=False,
allowsMultipleSelection=True, allowsEmptySelection=True,
drawVerticalLines=False, drawHorizontalLines=False,
autohidesScrollers=True, drawFocusRing=True, rowHeight=17.0,
selfDropSettings=None,
selfWindowDropSettings=None,
selfDocumentDropSettings=None,
selfApplicationDropSettings=None,
otherApplicationDropSettings=None,
dragSettings=None):
if items is not None and dataSource is not None:
raise VanillaError("can't pass both items and dataSource arguments")
self._posSize = posSize
self._enableDelete = enableDelete
self._nsObject = getNSSubclass(self.nsScrollViewClass)(self)
self._nsObject.setAutohidesScrollers_(autohidesScrollers)
self._nsObject.setHasHorizontalScroller_(True)
self._nsObject.setHasVerticalScroller_(True)
self._nsObject.setBorderType_(NSBezelBorder)
self._nsObject.setDrawsBackground_(True)
self._setAutosizingFromPosSize(posSize)
# add a table view to the scroll view
self._tableView = getNSSubclass(self.nsTableViewClass)(self)
self._nsObject.setDocumentView_(self._tableView)
# set up an observer that will be called by the bindings when a cell is edited
self._editCallback = editCallback
self._editObserver = self.nsArrayControllerObserverClass.alloc().init()
if editCallback is not None:
self._editObserver._targetMethod = self._edit # circular reference to be killed in _breakCycles
if items is not None:
# wrap all the items
items = [self._wrapItem(item) for item in items]
items = NSMutableArray.arrayWithArray_(items)
# set up an array controller
self._arrayController = self.nsArrayControllerClass.alloc().initWithContent_(items)
self._arrayController.setSelectsInsertedObjects_(False)
self._arrayController.setAvoidsEmptySelection_(not allowsEmptySelection)
self._tableView.setDataSource_(self._arrayController)
else:
self._tableView.setDataSource_(dataSource)
self._arrayController = None
# hide the header
if not showColumnTitles or not columnDescriptions:
self._tableView.setHeaderView_(None)
self._tableView.setCornerView_(None)
# set the table attributes
self._tableView.setUsesAlternatingRowBackgroundColors_(True)
if not drawFocusRing:
self._tableView.setFocusRingType_(NSFocusRingTypeNone)
self._tableView.setRowHeight_(rowHeight)
self._tableView.setAllowsEmptySelection_(allowsEmptySelection)
self._tableView.setAllowsMultipleSelection_(allowsMultipleSelection)
if drawVerticalLines or drawHorizontalLines:
if drawVerticalLines and drawHorizontalLines:
lineType = NSTableViewSolidVerticalGridLineMask | NSTableViewSolidHorizontalGridLineMask
elif drawVerticalLines:
lineType = NSTableViewSolidVerticalGridLineMask
else:
lineType = NSTableViewSolidHorizontalGridLineMask
self._tableView.setGridStyleMask_(lineType)
# set up the columns. also make a flag that will be used
# when unwrapping items.
self._orderedColumnIdentifiers = []
self._typingSensitiveColumn = 0
if not columnDescriptions:
self._makeColumnWithoutColumnDescriptions()
self._itemsWereDict = False
else:
self._makeColumnsWithColumnDescriptions(columnDescriptions)
self._itemsWereDict = True
# set some typing sensitivity data
self._typingSensitive = enableTypingSensitivity
if enableTypingSensitivity:
self._lastInputTime = None
self._typingInput = []
# set up an observer that will be called by the bindings when the selection changes.
# this needs to be done ater the items have been added to the table. otherwise,
# the selection method will be called when the items are added to the table view.
if selectionCallback is not None:
self._selectionCallback = selectionCallback
self._selectionObserver = self.nsArrayControllerObserverClass.alloc().init()
self._arrayController.addObserver_forKeyPath_options_context_(self._selectionObserver, "selectionIndexes", NSKeyValueObservingOptionNew, 0)
self._selectionObserver._targetMethod = self._selection # circular reference to be killed in _breakCycles
# set the double click callback the standard way
if doubleClickCallback is not None:
self._doubleClickTarget = VanillaCallbackWrapper(doubleClickCallback)
self._tableView.setTarget_(self._doubleClickTarget)
self._tableView.setDoubleAction_("action:")
# set the drop data
self._selfDropSettings = selfDropSettings
self._selfWindowDropSettings = selfWindowDropSettings
self._selfDocumentDropSettings = selfDocumentDropSettings
self._selfApplicationDropSettings = selfApplicationDropSettings
self._otherApplicationDropSettings = otherApplicationDropSettings
allDropTypes = []
for settings in (selfDropSettings, selfWindowDropSettings, selfDocumentDropSettings, selfApplicationDropSettings, otherApplicationDropSettings):
if settings is None:
continue
dropType = settings["type"]
allDropTypes.append(dropType)
self._tableView.registerForDraggedTypes_(allDropTypes)
# set the default drop operation masks
notLocal = NSDragOperationNone
if otherApplicationDropSettings is not None:
notLocal = otherApplicationDropSettings.get("operation", NSDragOperationCopy)
self._tableView.setDraggingSourceOperationMask_forLocal_(notLocal, False)
local = NSDragOperationNone
for settings in (selfDropSettings, selfDocumentDropSettings, selfApplicationDropSettings):
if settings is None:
continue
local = settings.get("operation", NSDragOperationCopy)
self._tableView.setDraggingSourceOperationMask_forLocal_(local, True)
# set the drag data
self._dragSettings = dragSettings
def _testForDeprecatedAttributes(self):
super(List, self)._testForDeprecatedAttributes()
from warnings import warn
if hasattr(self, "_scrollViewClass"):
warn(DeprecationWarning("The _scrollViewClass attribute is deprecated. Use the nsScrollViewClass attribute."))
self.nsScrollViewClass = self._scrollViewClass
if hasattr(self, "_tableViewClass"):
warn(DeprecationWarning("The _tableViewClass attribute is deprecated. Use the nsTableViewClass attribute."))
self.nsTableViewClass = self._tableViewClass
if hasattr(self, "_arrayControllerClass"):
warn(DeprecationWarning("The _arrayControllerClass attribute is deprecated. Use the nsArrayControllerClass attribute."))
self.nsArrayControllerClass = self._arrayControllerClass
if hasattr(self, "_arrayControllerObserverClass"):
warn(DeprecationWarning("The _arrayControllerObserverClass attribute is deprecated. Use the nsArrayControllerObserverClass attribute."))
self.nsArrayControllerObserverClass = self._arrayControllerObserverClass
def getNSScrollView(self):
"""
Return the *NSScrollView* that this object wraps.
"""
return self._nsObject
def getNSTableView(self):
"""
Return the *NSTableView* that this object wraps.
"""
return self._tableView
def _breakCycles(self):
super(List, self)._breakCycles()
if hasattr(self, "_editCallback") and self._editObserver is not None:
self._editObserver._targetMethod = None
if hasattr(self, "_selectionCallback") and self._selectionCallback is not None:
self._selectionObserver._targetMethod = None
if hasattr(self, "_doubleClickTarget") and self._doubleClickTarget is not None:
self._doubleClickTarget.callback = None
self._selfDropSettings = None
self._selfWindowDropSettings = None
self._selfDocumentDropSettings = None
self._otherApplicationDropSettings = None
self._otherApplicationDropSettings = None
def _handleColumnWidths(self, columnDescriptions):
# if the width is set in one of the columns,
# it must be set in all columns if the OS < 10.4.
# raise an error if the width is not defined in all.
if not _haveResizingMasks:
columnDataWithWidths = [column for column in columnDescriptions if column.get("width") is not None]
if columnDataWithWidths and not len(columnDataWithWidths) == len(columnDescriptions):
raise VanillaError("The width of all columns must be set in this version of the operating system")
# we also use this opportunity to determine if
# autoresizing should be set for the table.
autoResize = True
for column in columnDescriptions:
if column.get("width") is not None:
autoResize = False
break
if autoResize:
self._setColumnAutoresizing()
def _setColumnAutoresizing(self):
# set the resizing mask in OS > 10.3
if _haveResizingMasks:
self._tableView.setColumnAutoresizingStyle_(NSTableViewUniformColumnAutoresizingStyle)
# use the method in OS < 10.4
else:
self._tableView.setAutoresizesAllColumnsToFit_(True)
def _makeColumnWithoutColumnDescriptions(self):
self._setColumnAutoresizing()
column = NSTableColumn.alloc().initWithIdentifier_("item")
self._orderedColumnIdentifiers.append("item")
# set the data cell
column.dataCell().setDrawsBackground_(False)
if self._arrayController is not None:
# assign the key to the binding
keyPath = "arrangedObjects.item"
column.bind_toObject_withKeyPath_options_("value", self._arrayController, keyPath, None)
# set the column as editable if we have a callback
if self._editCallback is not None:
self._arrayController.addObserver_forKeyPath_options_context_(self._editObserver, keyPath, NSKeyValueObservingOptionNew, 0)
else:
column.setEditable_(False)
# finally, add the column to the table view
self._tableView.addTableColumn_(column)
def _makeColumnsWithColumnDescriptions(self, columnDescriptions):
# make sure that the column widths are in the correct format.
self._handleColumnWidths(columnDescriptions)
# create each column.
for columnIndex, data in enumerate(columnDescriptions):
title = data["title"]
key = data.get("key", title)
width = data.get("width")
formatter = data.get("formatter")
cell = data.get("cell")
editable = data.get("editable")
binding = data.get("binding", "value")
keyPath = "arrangedObjects.%s" % key
# check for typing sensitivity.
if data.get("typingSensitive"):
self._typingSensitiveColumn = columnIndex
# instantiate the column.
column = NSTableColumn.alloc().initWithIdentifier_(key)
self._orderedColumnIdentifiers.append(key)
# set the width
if width is not None:
# set the resizing mask in OS > 10.3
if _haveResizingMasks:
mask = NSTableColumnAutoresizingMask
column.setResizingMask_(mask)
# use the method in OS < 10.4
else:
column.setResizable_(True)
else:
# set the resizing mask in OS > 10.3
if _haveResizingMasks:
mask = NSTableColumnUserResizingMask | NSTableColumnAutoresizingMask
column.setResizingMask_(mask)
# use the method in OS < 10.4
else:
column.setResizable_(True)
# set the header cell
column.headerCell().setTitle_(title)
# set the data cell
if cell is None:
cell = column.dataCell()
cell.setDrawsBackground_(False)
cell.setStringValue_("") # cells have weird default values
else:
column.setDataCell_(cell)
# assign the formatter
if formatter is not None:
cell.setFormatter_(formatter)
if self._arrayController is not None:
# assign the key to the binding
column.bind_toObject_withKeyPath_options_(binding, self._arrayController, keyPath, None)
# set the editability of the column.
# if no value was defined in the column data,
# base the editability on the presence of
# an edit callback.
if editable is None and self._editCallback is None:
editable = False
elif editable is None and self._editCallback is not None:
editable = True
if editable:
if self._arrayController is not None:
self._arrayController.addObserver_forKeyPath_options_context_(self._editObserver, keyPath, NSKeyValueObservingOptionNew, 0)
else:
column.setEditable_(False)
# finally, add the column to the table view
self._tableView.addTableColumn_(column)
if width is not None:
# do this *after* adding the column to the table, or the first column
# will have the wrong width (at least on 10.3)
column.setWidth_(width)
def _wrapItem(self, item):
# if the item is an instance of NSObject, assume that
# it is KVC compliant and return it.
if isinstance(item, NSObject):
return item
# this is where we ensure key-value coding compliance.
# in order to do this, each item must be a NSDictionary
# or, in the case of editable Lists, NSMutableDictionary.
if self._editCallback is None:
dictClass = NSDictionary
else:
dictClass = NSMutableDictionary
# if the item is already in the proper class, pass.
if isinstance(item, dictClass):
pass
# convert a dictionary to the proper dictionary class.
elif isinstance(item, dict) or isinstance(item, NSDictionary):
item = NSMutableDictionary.dictionaryWithDictionary_(item)
# the item is not a dictionary, so wrap it inside of a dictionary.
else:
item = NSMutableDictionary.dictionaryWithDictionary_({"item": item})
return item
def _edit(self):
if self._editCallback is not None:
self._editCallback(self)
def _selection(self):
if self._selectionCallback is not None:
self._selectionCallback(self)
def _keyDown(self, event):
# this method is called by the NSTableView subclass after a key down
# has occurred. the subclass expects that a boolean will be returned
# that indicates if this method has done something (delete an item or
# select an item). if False is returned, the delegate calls the super
# method to insure standard key down behavior.
#
# get the characters
characters = event.characters()
# get the field editor
fieldEditor = self._tableView.window().fieldEditor_forObject_(True, self._tableView)
#
deleteCharacters = [
NSBackspaceCharacter,
NSDeleteFunctionKey,
NSDeleteCharacter,
unichr(0x007F),
]
nonCharacters = [
NSUpArrowFunctionKey,
NSDownArrowFunctionKey,
NSLeftArrowFunctionKey,
NSRightArrowFunctionKey,
NSPageUpFunctionKey,
NSPageDownFunctionKey,
unichr(0x0003),
u"\r",
u"\t",
]
if characters in deleteCharacters:
if self._enableDelete:
self._removeSelection()
return True
# arrow key. reset the typing entry if necessary.
elif characters in nonCharacters:
if self._typingSensitive:
self._lastInputTime = None
fieldEditor.setString_(u"")
return False
elif self._typingSensitive:
# get the current time
rightNow = time.time()
# no time defined. define it.
if self._lastInputTime is None:
self._lastInputTime = rightNow
# if the last input was too long ago,
# clear away the old input
if rightNow - self._lastInputTime > 0.75:
fieldEditor.setString_(u"")
# reset the clock
self._lastInputTime = rightNow
# add the characters to the fied editor
fieldEditor.interpretKeyEvents_([event])
# get the input string
inputString = fieldEditor.string()
# if the list has multiple columns, we'll use the items in the first column
tableColumns = self._tableView.tableColumns()
columnID = tableColumns[self._typingSensitiveColumn].identifier()
#
match = None
matchIndex = None
lastResort = None
lastResortIndex = None
inputLength = len(inputString)
for index in xrange(len(self)):
item = self._arrayController.content()[index]
# the item could be a dictionary or
# a NSObject. safely handle each.
if isinstance(item, NSDictionary):
item = item[columnID]
else:
item = getattr(item, columnID)()
# only test strings
if not isinstance(item, basestring):
continue
# if the item starts with the input string, it is considered a match
if item.startswith(inputString):
if match is None:
match = item
matchIndex = index
continue
# only if the item is less than the previous match is it a more relevant match
# example:
# given this order: sys, signal
# and this input string: s
# sys will be the first match, but signal is the more accurate match
if item < match:
match = item
matchIndex = index
continue
# if the item is greater than the input string,it can be used as a last resort
# example:
# given this order: vanilla, zipimport
# and this input string: x
# zipimport will be used as the last resort
if item > inputString:
if lastResort is None:
lastResort = item
lastResortIndex = index
continue
# if existing the last resort is greater than the item
# the item is a closer match to the input string
if lastResort > item:
lastResort = item
lastResortIndex = index
continue
if matchIndex is not None:
self.setSelection([matchIndex])
return True
elif lastResortIndex is not None:
self.setSelection([lastResortIndex])
return True
return False
# -------------
# list behavior
# -------------
def __len__(self):
return len(self._arrayController.content())
def __getitem__(self, index):
item = self._arrayController.content()[index]
if not self._itemsWereDict:
item = item["item"]
return item
def __setitem__(self, index, value):
# rather than inserting a new item, replace the
# content of the existing item at the index.
# this will call the editCallback if assigned
# so temporarily suspend it.
editCallback = self._editCallback
self._editCallback = None
item = self._arrayController.content()[index]
if self._itemsWereDict:
for key, value in value.items():
item[key] = value
else:
item["item"] = value
self._editCallback = editCallback
def __delitem__(self, index):
index = self._getSortedIndexesFromUnsortedIndexes([index])[0]
self._arrayController.removeObjectAtArrangedObjectIndex_(index)
def __contains__(self, item):
item = self._wrapItem(item)
return self._arrayController.content().containsObject_(item)
def append(self, item):
item = self._wrapItem(item)
self._arrayController.addObject_(item)
def remove(self, item):
index = self.index(item)
del self[index]
def index(self, item):
item = self._wrapItem(item)
return self._arrayController.content().index(item)
def insert(self, index, item):
item = self._wrapItem(item)
if index < len(self._arrayController.content()):
index = self._getSortedIndexesFromUnsortedIndexes([index])[0]
self._arrayController.insertObject_atArrangedObjectIndex_(item, index)
def extend(self, items):
items = [self._wrapItem(item) for item in items]
self._arrayController.addObjects_(items)
# ----------------
# vanilla behavior
# ----------------
def enable(self, onOff):
"""
Enable or disable the object. **onOff** should be a boolean.
"""
self._tableView.setEnabled_(onOff)
def set(self, items):
"""
Set the items in the list.
**items** should follow the same format as described in the constructor.
"""
items = [self._wrapItem(item) for item in items]
items = NSMutableArray.arrayWithArray_(items)
self._arrayController.setContent_(items)
def get(self):
"""
Get the list of items in the list.
"""
items = list(self._arrayController.content())
if not self._itemsWereDict:
items = [item["item"] for item in items]
return items
def _iterIndexSet(self, s):
i = s.firstIndex()
while i != NSNotFound:
yield i
i = s.indexGreaterThanIndex_(i)
def getEditedColumnAndRow(self):
# get the column and unsort
columnIndex = self._tableView.editedColumn()
if columnIndex != -1:
column = self._tableView.tableColumns()[columnIndex]
identifier = column.identifier()
columnIndex = self._orderedColumnIdentifiers.index(identifier)
# get the row and unsort
rowIndex = self._tableView.editedRow()
if rowIndex != -1:
rowIndex = self._getUnsortedIndexesFromSortedIndexes([rowIndex])[0]
return columnIndex, rowIndex
def getSelection(self):
"""
Get a list of indexes of selected items in the list.
"""
selectedRowIndexes = self._tableView.selectedRowIndexes()
# if nothing is selected return an empty list
if not selectedRowIndexes:
return []
# create a list containing only the selected indexes.
selectedRowIndexes = list(self._iterIndexSet(selectedRowIndexes))
return self._getUnsortedIndexesFromSortedIndexes(selectedRowIndexes)
def setSelection(self, selection):
"""
Set the selected items in the list.
**selection** should be a list of indexes.
"""
indexes = self._getSortedIndexesFromUnsortedIndexes(selection)
indexSet = NSMutableIndexSet.indexSet()
for index in selection:
indexSet.addIndex_(index)
self._arrayController.setSelectionIndexes_(indexSet)
self.scrollToSelection()
def _removeSelection(self):
selection = self.getSelection()
selection = self._getSortedIndexesFromUnsortedIndexes(selection)
indexSet = NSMutableIndexSet.indexSet()
for index in selection:
indexSet.addIndex_(index)
self._arrayController.removeObjectsAtArrangedObjectIndexes_(indexSet)
def scrollToSelection(self):
"""Scroll the selected rows to visible."""
selection = self.getSelection()
if not selection:
return
indexes = self._getSortedIndexesFromUnsortedIndexes(selection)
index = min(indexes)
self._tableView.scrollRowToVisible_(index)
# methods for handling sorted/unsorted index conversion
def _getUnsortedIndexesFromSortedIndexes(self, indexes):
arrayController = self._arrayController
sortDescriptors = arrayController.sortDescriptors()
# no sorting has been done. therefore, no unsorting
# needs to be done.
if not sortDescriptors:
return indexes
unsortedArray = arrayController.content()
sortedArray = unsortedArray.sortedArrayUsingDescriptors_(sortDescriptors)
# create a dict of (address, obj) for the sorted
# objects at the requested indexes.
sortedObjects = [(id(sortedArray[index]), sortedArray[index]) for index in indexes]
sortedObjects = dict.fromkeys(sortedObjects)
# find the indexes of the ubsorted objects matching
# the sorted objects
unsortedIndexes = []
for index in xrange(len(unsortedArray)):
obj = unsortedArray[index]
test = (id(obj), obj)
if test in sortedObjects:
unsortedIndexes.append(index)
del sortedObjects[test]
if not sortedObjects:
break
return unsortedIndexes
def _getSortedIndexesFromUnsortedIndexes(self, indexes):
arrayController = self._arrayController
sortDescriptors = arrayController.sortDescriptors()
# no sorting has been done. therefore, no unsorting
# needs to be done.
if not sortDescriptors:
return indexes
unsortedArray = arrayController.content()
sortedArray = unsortedArray.sortedArrayUsingDescriptors_(sortDescriptors)
# create a dict of (address, obj) for the unsorted
# objects at the requested indexes.
unsortedObjects = [(id(unsortedArray[index]), unsortedArray[index]) for index in indexes]
unsortedObjects = dict.fromkeys(unsortedObjects)
# find the indexes of the sorted objects matching
# the unsorted objects
sortedIndexes = []
for index in xrange(len(sortedArray)):
obj = sortedArray[index]
test = (id(obj), obj)
if test in unsortedObjects:
sortedIndexes.append(index)
del unsortedObjects[test]
if not unsortedObjects:
break
return sortedIndexes
def CheckBoxListCell(title=None):
"""
An object that displays a check box in a List column.
**This object should only be used in the *columnDescriptions*
argument during the construction of a List.**
**title** The title to be set in *all* items in the List column.
"""
cell = NSButtonCell.alloc().init()
cell.setButtonType_(NSSwitchButton)
cell.setControlSize_(NSSmallControlSize)
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(NSSmallControlSize))
cell.setFont_(font)
if title is None:
title = ""
cell.setTitle_(title)
return cell
def SliderListCell(minValue=0, maxValue=100):
"""
An object that displays a slider in a List column.
**This object should only be used in the *columnDescriptions*
argument during the construction of a List.**
**minValue** The minimum value for the slider.
**maxValue** The maximum value for the slider.
"""
cell = NSSliderCell.alloc().init()
cell.setControlSize_(NSSmallControlSize)
cell.setMinValue_(minValue)
cell.setMaxValue_(maxValue)
return cell
def PopUpButtonListCell(items):
"""
An object that displays a pop up list in a List column.
**This object should only be used in the *columnDescriptions*
argument during the construction of a List.**
**items** The items that should appear in the pop up list.
"""
cell = NSPopUpButtonCell.alloc().init()
cell.setBordered_(False)
# add the basic items
titles = []
for title in items:
if isinstance(title, (NSString, NSAttributedString)):
title = title.string()
titles.append(title)
cell.addItemsWithTitles_(titles)
# add attributed titles
for index, title in enumerate(items):
if not isinstance(title, NSAttributedString):
continue
item = cell.itemAtIndex_(index)
item.setAttributedTitle_(title)
return cell
| |
"""Base classes and helpers for unit tests."""
try:
from unittest import mock
except ImportError:
import mock
import github3
import json
import os.path
import pytest
import unittest
def create_url_helper(base_url):
"""A function to generate ``url_for`` helpers."""
base_url = base_url.rstrip('/')
def url_for(path=''):
if path:
path = '/' + path.strip('/')
return base_url + path
return url_for
def create_example_data_helper(example_filename):
"""A function to generate example data helpers."""
directory = os.path.dirname(__file__)
directory = os.path.join(directory, "json")
example = os.path.join(directory, example_filename)
def data_helper():
with open(example) as fd:
data = json.load(fd)
return data
return data_helper
def build_url(self, *args, **kwargs):
"""A function to proxy to the actual GitHubSession#build_url method."""
# We want to assert what is happening with the actual calls to the
# Internet. We can proxy this.
return github3.session.GitHubSession().build_url(*args, **kwargs)
class UnitHelper(unittest.TestCase):
"""Base class for unittests."""
# Sub-classes must assign the class to this during definition
described_class = None
# Sub-classes must also assign a dictionary to this during definition
example_data = {}
def create_mocked_session(self):
"""Use mock to auto-spec a GitHubSession and return an instance."""
MockedSession = mock.create_autospec(github3.session.GitHubSession)
return MockedSession()
def create_session_mock(self, *args):
"""Create a mocked session and add headers and auth attributes."""
session = self.create_mocked_session()
base_attrs = ['headers', 'auth']
attrs = dict(
(key, mock.Mock()) for key in set(args).union(base_attrs)
)
session.configure_mock(**attrs)
session.delete.return_value = None
session.get.return_value = None
session.patch.return_value = None
session.post.return_value = None
session.put.return_value = None
session.has_auth.return_value = True
return session
def create_instance_of_described_class(self):
"""
Use cls.example_data to create an instance of the described class.
If cls.example_data is None, just create a simple instance of the
class.
"""
if self.example_data and self.session:
instance = self.described_class(self.example_data,
self.session)
elif self.example_data and not self.session:
instance = self.described_class(self.example_data)
else:
instance = self.described_class()
instance.session = self.session
return instance
def delete_called_with(self, *args, **kwargs):
"""Use to assert delete was called with JSON."""
self.method_called_with('delete', args, kwargs)
def method_called_with(self, method_name, args, kwargs):
"""Assert that a method was called on a session with JSON."""
mock_method = getattr(self.session, method_name)
assert mock_method.called is True
call_args, call_kwargs = mock_method.call_args
# Data passed to assertion
data = kwargs.pop('data', None)
# Data passed to patch
call_data = call_kwargs.pop('data', None)
# Data passed by the call to post positionally
# URL, 'json string'
if call_data is None:
call_args, call_data = call_args[:1], call_args[1]
# If data is a dictionary (or list) and call_data exists
if not isinstance(data, str) and call_data:
call_data = json.loads(call_data)
assert args == call_args
assert data == call_data
assert kwargs == call_kwargs
def patch_called_with(self, *args, **kwargs):
"""Use to assert patch was called with JSON."""
self.method_called_with('patch', args, kwargs)
def post_called_with(self, *args, **kwargs):
"""Use to assert post was called with JSON."""
assert self.session.post.called is True
call_args, call_kwargs = self.session.post.call_args
# Data passed to assertion
data = kwargs.pop('data', None)
# Data passed by the call to post positionally
# URL, 'json string'
call_args, call_data = call_args[:1], call_args[1]
# If data is a dictionary (or list) and call_data exists
if not isinstance(data, str) and call_data:
call_data = json.loads(call_data)
assert args == call_args
assert data == call_data
assert kwargs == call_kwargs
def put_called_with(self, *args, **kwargs):
"""Use to assert put was called with JSON."""
self.method_called_with('put', args, kwargs)
def setUp(self):
"""Use to set up attributes on self before each test."""
self.session = self.create_session_mock()
self.instance = self.create_instance_of_described_class()
# Proxy the build_url method to the class so it can build the URL and
# we can assert things about the call that will be attempted to the
# internet
self.described_class._build_url = build_url
self.after_setup()
def after_setup(self):
"""No-op method to avoid people having to override setUp."""
pass
class UnitIteratorHelper(UnitHelper):
"""Base class for iterator based unit tests."""
def create_session_mock(self, *args):
"""Override UnitHelper's create_session_mock method.
We want all methods to return an instance of the NullObject. This
class has a dummy ``__iter__`` implementation which we want for
methods that iterate over the results of a response.
"""
# Retrieve a mocked session object
session = super(UnitIteratorHelper, self).create_mocked_session(*args)
# Initialize a NullObject which has magical properties
null = github3.null.NullObject()
# Set it as the return value for every method
session.delete.return_value = null
session.get.return_value = null
session.patch.return_value = null
session.post.return_value = null
session.put.return_value = null
return session
def get_next(self, iterator):
"""Nicely wrap up a call to the iterator."""
try:
next(iterator)
except StopIteration:
pass
def patch_get_json(self):
"""Patch a GitHubIterator's _get_json method."""
self.get_json_mock = mock.patch.object(
github3.structs.GitHubIterator, '_get_json'
)
self.patched_get_json = self.get_json_mock.start()
self.patched_get_json.return_value = []
def setUp(self):
"""Use UnitHelper's setUp but also patch _get_json."""
super(UnitIteratorHelper, self).setUp()
self.patch_get_json()
def tearDown(self):
"""Stop mocking _get_json."""
super(UnitIteratorHelper, self).tearDown()
self.get_json_mock.stop()
class UnitRequiresAuthenticationHelper(UnitHelper):
"""Helper for unit tests that demonstrate authentication is required."""
def after_setup(self):
"""Disable authentication on the session."""
self.session.auth = None
self.session.has_auth.return_value = False
def assert_requires_auth(self, func, *args, **kwargs):
"""
Assert error is raised if function is called without
authentication.
"""
with pytest.raises(github3.exceptions.AuthenticationFailed):
func(*args, **kwargs)
class UnitGitHubObjectHelper(UnitHelper):
"""Base class for GitHubObject unit tests."""
def setUp(self):
self.session = None
self.instance = self.create_instance_of_described_class()
# Proxy the build_url method to the class so it can build the URL and
# we can assert things about the call that will be attempted to the
# internet
self.described_class._build_url = build_url
self.after_setup()
pass
@pytest.mark.usefixtures('enterprise_url')
class UnitGitHubEnterpriseHelper(UnitHelper):
def build_url(self, *args, **kwargs):
"""A function to proxy to the actual GitHubSession#build_url method."""
# We want to assert what is happening with the actual calls to the
# Internet. We can proxy this.
return github3.session.GitHubSession().build_url(
*args,
base_url=self.enterprise_url,
**kwargs
)
def setUp(self):
self.session = self.create_session_mock()
self.instance = github3.GitHubEnterprise(self.enterprise_url)
self.instance.session = self.session
# Proxy the build_url method to the class so it can build the URL and
# we can assert things about the call that will be attempted to the
# internet
self.instance._build_url = self.build_url
self.after_setup()
| |
# $Id$
#
# Copyright (C) 2007 greg Landrum
#
# @@ All Rights Reserved @@
#
import unittest,subprocess,os
from rdkit import RDConfig
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def test1Create(self):
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnless(d[0][0]==10)
conn = DbConnect('testData/bzr/AtomPairs.sqlt')
d = conn.GetData('atompairs',fields='count(*)')
self.failUnless(d[0][0]==10)
conn = DbConnect('testData/bzr/Descriptors.sqlt')
d = conn.GetData('descriptors_v1',fields='count(*)')
self.failUnless(d[0][0]==10)
conn = DbConnect('testData/bzr/Fingerprints.sqlt')
d = conn.GetData('rdkitfps',fields='count(*)')
self.failUnless(d[0][0]==10)
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=sdf',
'--doGobbi2D',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnless(d[0][0]==163)
conn = DbConnect('testData/bzr/AtomPairs.sqlt')
d = conn.GetData('atompairs',fields='count(*)')
self.failUnless(d[0][0]==163)
conn = DbConnect('testData/bzr/Descriptors.sqlt')
d = conn.GetData('descriptors_v1',fields='count(*)')
self.failUnless(d[0][0]==163)
conn = DbConnect('testData/bzr/Fingerprints.sqlt')
d = conn.GetData('rdkitfps',fields='count(*)')
self.failUnless(d[0][0]==163)
def test2_1SearchFPs(self):
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr','--molFormat=sdf',
'--topN=5','--outF=testData/bzr/search.out','testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==163)
splitLs=[x.strip().split(',') for x in lines]
for line in splitLs:
lbl = line[0]
i=1
nbrs={}
lastVal=1.0
while i<len(line):
nbrs[line[i]]=line[i+1]
self.failUnless(float(line[i+1])<=lastVal)
lastVal=float(line[i+1])
i+=2
self.failUnless(nbrs.has_key(lbl))
self.failUnless(nbrs[lbl]=='1.000',nbrs[lbl])
os.unlink('testData/bzr/search.out')
def test2_2SearchAtomPairs(self):
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr','--molFormat=sdf',
'--topN=5','--outF=testData/bzr/search.out','--similarityType=AtomPairs',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==163)
splitLs=[x.strip().split(',') for x in lines]
for line in splitLs:
lbl = line[0]
i=1
nbrs={}
lastVal=1.0
while i<len(line):
nbrs[line[i]]=line[i+1]
self.failUnless(float(line[i+1])<=lastVal)
lastVal=float(line[i+1])
i+=2
self.failUnless(nbrs.has_key(lbl))
self.failUnless(nbrs[lbl]=='1.000')
os.unlink('testData/bzr/search.out')
def test2_3SearchTorsions(self):
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr','--molFormat=sdf','--topN=5',
'--outF=testData/bzr/search.out','--similarityType=TopologicalTorsions',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==163)
splitLs=[x.strip().split(',') for x in lines]
for line in splitLs:
lbl = line[0]
i=1
nbrs={}
lastVal=1.0
while i<len(line):
nbrs[line[i]]=line[i+1]
self.failUnless(float(line[i+1])<=lastVal)
lastVal=float(line[i+1])
i+=2
self.failUnless(nbrs.has_key(lbl))
self.failUnless(nbrs[lbl]=='1.000')
os.unlink('testData/bzr/search.out')
def test2_4SearchProps(self):
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--query=activity<6.5'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==30)
os.unlink('testData/bzr/search.out')
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--query=activity<6.5'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==30)
os.unlink('testData/bzr/search.out')
def test2_5SearchSmarts(self):
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--smarts=cncncc',))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),49)
os.unlink('testData/bzr/search.out')
if os.path.exists('/dev/null'):
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=/dev/null',
'--smilesOut=testData/bzr/search.out',
'--smarts=cncncc',))
else:
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/crud.out',
'--smilesOut=testData/bzr/search.out',
'--smarts=cncncc',))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),49)
os.unlink('testData/bzr/search.out')
if os.path.exists('testData/crud.out'):
os.unlink('testData/crud.out')
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--negate','--smarts=cncncc',))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),114)
os.unlink('testData/bzr/search.out')
def test2_6SearchBoth(self):
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--query=activity<6.5','--smarts=cncncc'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),5)
os.unlink('testData/bzr/search.out')
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr',
'--outF=testData/bzr/search.out','--query=activity<6.5',
'--smarts=cncncc','--negate'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),25)
os.unlink('testData/bzr/search.out')
def test2_7SearchGobbi(self):
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failUnless(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr','--molFormat=sdf','--topN=5',
'--outF=testData/bzr/search.out','--similarityType=Gobbi2D',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnless(len(lines)==163)
splitLs=[x.strip().split(',') for x in lines]
for line in splitLs:
lbl = line[0]
i=1
nbrs={}
lastVal=1.0
while i<len(line):
nbrs[line[i]]=line[i+1]
self.failUnless(float(line[i+1])<=lastVal)
lastVal=float(line[i+1])
i+=2
self.failUnless(nbrs.has_key(lbl))
self.failUnless(nbrs[lbl]=='1.000')
self.failUnlessEqual(splitLs[0][0],'Adinazolam')
self.failUnlessEqual(splitLs[0][3],'alpha-hydroxytriazolam')
self.failUnlessEqual(splitLs[0][4],'0.631')
os.unlink('testData/bzr/search.out')
def test4CreateOptions(self):
if os.path.exists('testData/bzr/Compounds.sqlt'):
os.unlink('testData/bzr/Compounds.sqlt')
if os.path.exists('testData/bzr/AtomPairs.sqlt'):
os.unlink('testData/bzr/AtomPairs.sqlt')
if os.path.exists('testData/bzr/Descriptors.sqlt'):
os.unlink('testData/bzr/Descriptors.sqlt')
if os.path.exists('testData/bzr/Fingerprints.sqlt'):
os.unlink('testData/bzr/Fingerprints.sqlt')
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'--noExtras','--noSmiles',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failIf(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failIf(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failIf(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnlessEqual(d[0][0],10)
d = conn.GetData('molecules',fields='*')
self.failUnlessEqual(len(d),10)
cns = [x.lower() for x in d.GetColumnNames()]
self.failIf('smiles' in cns)
conn=None
d=None
if os.path.exists('testData/bzr/Compounds.sqlt'):
os.unlink('testData/bzr/Compounds.sqlt')
if os.path.exists('testData/bzr/AtomPairs.sqlt'):
os.unlink('testData/bzr/AtomPairs.sqlt')
if os.path.exists('testData/bzr/Descriptors.sqlt'):
os.unlink('testData/bzr/Descriptors.sqlt')
if os.path.exists('testData/bzr/Fingerprints.sqlt'):
os.unlink('testData/bzr/Fingerprints.sqlt')
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'--noSmiles','--noFingerprints','--noLayeredFps','--noMorganFps','--noPairs','--noDescriptors',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failIf(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failIf(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failIf(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnless(d[0][0]==10)
d = conn.GetData('molecules',fields='*')
self.failUnless(len(d)==10)
cns = [x.lower() for x in d.GetColumnNames()]
self.failIf('smiles' in cns)
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'--noProps','--noFingerprints','--noLayeredFps','--noMorganFps','--noPairs','--noDescriptors',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failIf(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failIf(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failIf(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnlessEqual(d[0][0],10)
d = conn.GetData('molecules',fields='*')
self.failUnlessEqual(len(d),10)
cns = [x.lower() for x in d.GetColumnNames()]
self.failUnless('smiles' in cns)
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'--noFingerprints','--noLayeredFps','--noMorganFps','--noPairs','--noDescriptors',
'--maxRowsCached=4',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failIf(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failIf(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failIf(os.path.exists('testData/bzr/Fingerprints.sqlt'))
conn = DbConnect('testData/bzr/Compounds.sqlt')
d = conn.GetData('molecules',fields='count(*)')
self.failUnlessEqual(d[0][0],10)
d = conn.GetData('molecules',fields='*')
self.failUnlessEqual(len(d),10)
cns = [x.lower() for x in d.GetColumnNames()]
self.failUnless('smiles' in cns)
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr','--molFormat=smiles',
'--noFingerprints',
'--noPairs','--noDescriptors',
'--maxRowsCached=4',
'testData/bzr.smi'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/Compounds.sqlt'))
self.failIf(os.path.exists('testData/bzr/AtomPairs.sqlt'))
self.failIf(os.path.exists('testData/bzr/Descriptors.sqlt'))
self.failUnless(os.path.exists('testData/bzr/Fingerprints.sqlt'))
def test5TestBackwardsCompat(self):
if os.path.exists('testData/bzr/Compounds.sqlt'):
os.unlink('testData/bzr/Compounds.sqlt')
if os.path.exists('testData/bzr/AtomPairs.sqlt'):
os.unlink('testData/bzr/AtomPairs.sqlt')
if os.path.exists('testData/bzr/Descriptors.sqlt'):
os.unlink('testData/bzr/Descriptors.sqlt')
if os.path.exists('testData/bzr/Fingerprints.sqlt'):
os.unlink('testData/bzr/Fingerprints.sqlt')
p = subprocess.Popen(('python', 'CreateDb.py','--dbDir=testData/bzr',
'--noFingerprints','--noDescriptors',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
conn = DbConnect('testData/bzr/AtomPairs.sqlt')
curs = conn.GetCursor()
curs.execute('create table tmp as select compound_id,atompairfp,torsionfp from atompairs')
p = subprocess.Popen(('python', 'SearchDb.py','--dbDir=testData/bzr','--molFormat=sdf',
'--topN=5','--outF=testData/bzr/search.out','--similarityType=AtomPairs',
'--pairTableName=tmp',
'testData/bzr.sdf'))
res=p.wait()
self.failIf(res)
p=None
self.failUnless(os.path.exists('testData/bzr/search.out'))
inF = file('testData/bzr/search.out','r')
lines=inF.readlines()
inF=None
self.failUnlessEqual(len(lines),163)
splitLs=[x.strip().split(',') for x in lines]
for line in splitLs:
lbl = line[0]
i=1
nbrs={}
lastVal=1.0
while i<len(line):
nbrs[line[i]]=line[i+1]
self.failUnless(float(line[i+1])<=lastVal)
lastVal=float(line[i+1])
i+=2
self.failUnless(nbrs.has_key(lbl))
self.failUnless(nbrs[lbl]=='1.000')
os.unlink('testData/bzr/search.out')
if __name__ == '__main__':
unittest.main()
| |
using PyCall,DataFrames,RCall
unshift!(PyVector(pyimport("sys")["path"]), "")
@pyimport ncdata
len = length
filename = "./reaction_classify/cri2_nhept.nc"
data = ncdata.get(filename)
specs = names!(DataFrame(data["spec"]),[Symbol(i)for i in data["sc"]])
rates = names!(DataFrame(data["rate"]),[Symbol(i)for i in data["rc"]])
rates = rates[:,[Symbol(i) for i in filter(x->!ismatch(r"EMISS|DUMMY",x),data["rc"])]]
specs = specs[:,[Symbol(i) for i in filter(x->!ismatch(r"EMISS|DUMMY",x),data["sc"])]]
rates = rates[7:len(rates)]
rates = rates[:,[Symbol(i) for i in filter(x->sum(rates[x])>0,names(rates))]]
joint = "\n"*(join(names(rates),"\n"))*"\n"
products= [split(i[4:len(i)],"+") for i in matchall(r"-->([A-z0-9+]*)",joint)]
reactants = [split(i[2:len(i)-1],"+") for i in matchall(r"\n([A-z0-9+]{1,60})([-->]{0,1})",joint)]
node_set = Set([ i[2:len(i)-1] for i in matchall(r"[\n+>-]{1}[A-z0-9]+[\n+>-]{1}",joint) ])
nodes = [i for i in node_set]
dict = Dict( [[i,x] for (x,i) in enumerate(nodes)])
flux = []
edges = []
for i in 1:length(reactants)
rcol=[]
#println(i)
for j in reactants[i]
coeff = match( r"([\d\s\.]*)(\D[\d\D]*)", j)
dummy = specs[Symbol(coeff[2])]
try
push!(rcol,parse(Float32,coeff[1]))
catch
push!(rcol,1.0f0)
end
prod = 1
for k in rcol
prod*=k
end
push!(flux, prod*rates[i])
# make edges array
for l in products[i]
push!(edges,[match( r"([\d\s\.]*)(\D[\d\D]*)", j)[2],match( r"([\d\s\.]*)(\D[\d\D]*)", l)[2],i])
end
end
end
normalise(x) = x/norm(x)
function sorteqn(x,delim)
r,p = split(string(x),delim)
r = join(sort(split(r,'+')),'+')
p = join(sort(split(p,'+')),'+')
return join([r,p],delim)
end
rateeqn = [Symbol(sorteqn(i,"-->")) for i in names(rates)]
t = 144
links = filter(i -> flux[i][t]>0 , 1:len(flux))
tflux = [log10(flux[i][t]) for i in links]
weight = tflux - minimum(tflux)
weight = 1 - weight/maximum(weight)
#1+normalise(tflux)
newflux = [flux[i][t] for i in 1:len(flux)]
counter = 0
for i in links
newflux[i] = weight[counter+=1]
end
edge = filter(i -> newflux[i[3]]>0 , edges)
source = [i[1] for i in edge]
target=[i[2] for i in edge]
weighted = [newflux[i[3]]+0.0001 for i in edge]
novalues = [i>0. for i in Array(specs[t,:])]
novalues = Set([string(i) for i in names(specs)[novalues]])
@rput source
@rput target
@rput weighted
R"library(igraph)"
R"el <- structure(list(V1 = source, V2 = target, weight = weighted), .Names = c('V1',
'V2', 'weight'), class = 'data.frame', row.names = c(1:$(length(edge))
))"
R"g <- graph.data.frame(el)"
#if as undirected:
R"""undirected = function(x){
if (length(x)==1) { return(x[1])}
else if (length(x)>2) {stop('Too many variables, use igraphs simplify function')}
else {return(abs(x[1]-x[2]))}
}"""#abs(x[1]-x[2]))}"
#makes graph undirected
R"g = simplify(g, edge.attr.comb='sum')"
R"g = as.undirected(g, mode = c('collapse'), edge.attr.comb = undirected)"
R"weights = E(g)$weight"
R"E(g)$weight = weights/max(weights)"
R"net=g"
#remove non carbons
smiles =readtable("./reaction_classify/carbons.csv")
smiles = Set(smiles[:species])
R"v = V(g)$name"
@rget v
v=Set(v)
# to remove carbons comment below
#smiles = Set()
smiles = intersect(smiles,novalues)
diff = [i for i in setdiff(v,smiles)]
@rput diff
R"g = delete.vertices(g,diff)"
R"net = g"
hubs = R"hub_score(net)$vector"
authorities = R"authority_score(net, weights=NA)$vector"
indegree = R"degree(net, mode='in')"
outdegree = R"degree(net, mode='out')"
closeness = R"closeness(net, mode='all')# ,weights=NA)"
ec = R"eigen_centrality(net, directed=TRUE, weights=E(net)$weight)$vector"
#betweenness = R"betweenness(net, directed=T)"
df = DataFrame()
df[Symbol("")] = [string(i) for i in names(hubs)]
df[Symbol("Concpure"*string(t))] = [specs[t,i] for i in names(hubs)]
df[Symbol("Conc"*string(t))] = [log10(specs[t,i]) for i in names(hubs)]
df[Symbol("Hubs"*string(t))] = [hubs[i] for i in names(hubs)]
df[Symbol("Authorities"*string(t))] = [authorities[i] for i in names(hubs)]
df[Symbol("InDeg"*string(t))] = [indegree[i] for i in names(hubs)]
df[Symbol("OutDeg"*string(t))] = [outdegree[i] for i in names(hubs)]
df[Symbol("Closeness"*string(t))] = [closeness[i] for i in names(hubs)]
df[Symbol("Eigenvector"*string(t))] = [ec[i] for i in names(hubs)]
writetable("output.csv", df)
t = 144+72
links = filter(i -> flux[i][t]>0 , 1:len(flux))
tflux = [log10(flux[i][t]) for i in links]
weight = tflux - minimum(tflux)
weight = 1 - weight/maximum(weight)
#1+normalise(tflux)
newflux = [flux[i][t] for i in 1:len(flux)]
counter = 0
for i in links
newflux[i] = weight[counter+=1]
end
edge = filter(i -> newflux[i[3]]>0 , edges)
source = [i[1] for i in edge]
target=[i[2] for i in edge]
weighted = [newflux[i[3]]+0.0001 for i in edge]
novalues = [i>0. for i in Array(specs[t,:])]
novalues = Set([string(i) for i in names(specs)[novalues]])
@rput source
@rput target
@rput weighted
R"library(igraph)"
R"el <- structure(list(V1 = source, V2 = target, weight = weighted), .Names = c('V1',
'V2', 'weight'), class = 'data.frame', row.names = c(1:$(length(edge))
))"
R"g <- graph.data.frame(el)"
#if as undirected:
R"""undirected = function(x){
if (length(x)==1) { return(x[1])}
else if (length(x)>2) {stop('Too many variables, use igraphs simplify function')}
else {return(abs(x[1]-x[2]))}
}"""#abs(x[1]-x[2]))}"
#makes graph undirected
R"g = simplify(g, edge.attr.comb='sum')"
R"g = as.undirected(g, mode = c('collapse'), edge.attr.comb = undirected)"
R"weights = E(g)$weight"
R"E(g)$weight = weights/max(weights)"
R"net=g"
#remove non carbons
smiles =readtable("./reaction_classify/carbons.csv")
smiles = Set(smiles[:species])
R"v = V(g)$name"
@rget v
v=Set(v)
# to remove carbons comment below
#smiles = Set()
smiles = intersect(smiles,novalues)
diff = [i for i in setdiff(v,smiles)]
@rput diff
R"g = delete.vertices(g,diff)"
R"net = g"
hubs = R"hub_score(net)$vector"
authorities = R"authority_score(net, weights=NA)$vector"
indegree = R"degree(net, mode='in')"
outdegree = R"degree(net, mode='out')"
closeness = R"closeness(net, mode='all')# ,weights=NA)"
ec = R"eigen_centrality(net, directed=TRUE, weights=E(net)$weight)$vector"
#betweenness = R"betweenness(net, directed=T)"
df[Symbol("Concpure"*string(t))] = [specs[t,i] for i in names(hubs)]
df[Symbol("Conc"*string(t))] = [log10(specs[t,i]) for i in names(hubs)]
df[Symbol("Hubs"*string(t))] = [hubs[i] for i in names(hubs)]
df[Symbol("Authorities"*string(t))] = [authorities[i] for i in names(hubs)]
df[Symbol("InDeg"*string(t))] = [indegree[i] for i in names(hubs)]
df[Symbol("OutDeg"*string(t))] = [outdegree[i] for i in names(hubs)]
df[Symbol("Closeness"*string(t))] = [closeness[i] for i in names(hubs)]
df[Symbol("Eigenvector"*string(t))] = [ec[i] for i in names(hubs)]
writetable("output.csv", df)
| |
from __future__ import print_function
import click
import datetime
import errno
from functools import wraps
import json
import logging
import filelock
import os
import sys
from onecodex.api import Api
from onecodex.lib.auth import check_version, fetch_api_key_from_uname
from onecodex.utils import collapse_user
from onecodex.version import __version__
log = logging.getLogger("onecodex")
DATE_FORMAT = "%Y-%m-%d %H:%M"
API_KEY_LEN = 32
def login_uname_pwd(server, api_key=None):
"""Prompts user for username and password, gets API key from server if not provided."""
username = click.prompt("Please enter your One Codex (email)")
if api_key is not None:
return username, api_key
password = click.prompt("Please enter your password (typing will be hidden)", hide_input=True)
# now get the API key
api_key = fetch_api_key_from_uname(username, password, server)
return username, api_key
def _login(server, creds_file=None, api_key=None, silent=False):
"""Login main function."""
# fetch_api_key and check_version expect server to end in /
if server[-1] != "/":
server = server + "/"
# creds file path setup
if creds_file is None:
creds_file = os.path.expanduser("~/.onecodex")
# check if the creds file exists and is readable
if not os.path.exists(creds_file):
if silent:
return None
creds = {}
elif not os.access(creds_file, os.R_OK):
click.echo("Please check the permissions on {}".format(collapse_user(creds_file)), err=True)
sys.exit(1)
else:
with filelock.FileLock("{}.lock".format(creds_file)):
# it is, so let's read it!
with open(creds_file, "r") as fp:
try:
creds = json.load(fp)
except ValueError:
click.echo(
"Your ~/.onecodex credentials file appears to be corrupted. " # noqa
"Please delete it and re-authorize.",
err=True,
)
sys.exit(1)
# check for updates if logged in more than one day ago
last_update = creds.get("updated_at") or creds.get("saved_at")
last_update = (
last_update if last_update else datetime.datetime.now().strftime(DATE_FORMAT)
)
diff = datetime.datetime.now() - datetime.datetime.strptime(last_update, DATE_FORMAT)
if diff.days >= 1:
# if creds_file is old, check for updates
upgrade_required, msg = check_version(__version__, server)
creds["updated_at"] = datetime.datetime.now().strftime(DATE_FORMAT)
try:
json.dump(creds, open(creds_file, "w"))
except Exception as e:
if e.errno == errno.EACCES:
click.echo(
"Please check the permissions on {}".format(collapse_user(creds_file)),
err=True,
)
sys.exit(1)
else:
raise
if upgrade_required:
click.echo("\nWARNING: {}\n".format(msg), err=True)
# finally, give the user back what they want (whether silent or not)
if silent:
return creds.get("api_key", None)
click.echo(
"Credentials file already exists ({}). Logout first.".format(
collapse_user(creds_file)
),
err=True,
)
return creds.get("email", None)
# creds_file was not found and we're not silent, so prompt user to login
email, api_key = login_uname_pwd(server, api_key=api_key)
if api_key is None:
click.echo(
"We could not verify your credentials. Please check your email and password again. "
"If you have single sign-on (SSO) enabled, you must login with `onecodex --api-key $API_KEY login`. "
"Please contact help@onecodex.com if you continue to experience problems."
)
sys.exit(1)
creds.update(
{
"api_key": api_key,
"saved_at": datetime.datetime.now().strftime(DATE_FORMAT),
"updated_at": None,
"email": email,
}
)
with filelock.FileLock("{}.lock".format(creds_file)):
try:
json.dump(creds, open(creds_file, "w"))
except Exception as e:
if e.errno == errno.EACCES:
click.echo("Please check the permissions on {}".format(creds_file), err=True)
sys.exit(1)
else:
raise
click.echo("Your ~/.onecodex credentials file was successfully created.", err=True)
return email
def _remove_creds(creds_file=None):
"""Remove ~/.onecodex file, returning True if successul or False if the file didn't exist."""
if creds_file is None:
creds_file = os.path.expanduser("~/.onecodex")
try:
os.remove(creds_file)
except Exception as e:
if e.errno == errno.ENOENT:
return False
elif e.errno == errno.EACCES:
click.echo(
"Please check the permissions on {}".format(collapse_user(creds_file)), err=True
)
sys.exit(1)
else:
raise
try:
os.remove("{}.lock".format(creds_file))
except (IOError, OSError):
pass
return True
def _logout(creds_file=None):
"""Logout main function, just rm ~/.onecodex more or less."""
if _remove_creds(creds_file=creds_file):
click.echo("Successfully removed One Codex credentials.", err=True)
sys.exit(0)
else:
click.echo("No One Codex API keys found.", err=True)
sys.exit(1)
def login_required(fn):
"""Require login before proceeding, but does not prompt the user to login.
Decorator should be used only on Click CLI commands.
Notes
-----
Different means of authentication will be attempted in this order:
1. An API key present in the Click context object from a previous successful authentication.
2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment.
3. An API key (ONE_CODEX_API_KEY) in the environment.
4. An API key in the credentials file (~/.onecodex).
"""
@wraps(fn)
def login_wrapper(ctx, *args, **kwargs):
base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com")
api_kwargs = {"telemetry": ctx.obj["TELEMETRY"], "load_extensions": False}
api_key_prior_login = ctx.obj.get("API_KEY")
bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN")
api_key_env = os.environ.get("ONE_CODEX_API_KEY")
api_key_creds_file = _login(base_url, silent=True)
if api_key_prior_login is not None:
api_kwargs["api_key"] = api_key_prior_login
elif bearer_token_env is not None:
api_kwargs["bearer_token"] = bearer_token_env
elif api_key_env is not None:
api_kwargs["api_key"] = api_key_env
elif api_key_creds_file is not None:
api_kwargs["api_key"] = api_key_creds_file
else:
click.echo(
"The command you specified requires authentication. Please login first.\n", err=True
)
ctx.exit(1)
ctx.obj["API"] = Api(**api_kwargs)
return fn(ctx, *args, **kwargs)
return login_wrapper
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import logging
import warnings
import netaddr
from django.conf import settings
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name']:
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name') or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
super(Agent, self).__init__(apiresource)
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
# Django cannot handle a key name with ':', so use '__'
for key in apiresource.keys():
if ':' in key:
apiresource['__'.join(key.split(':'))] = apiresource[key]
super(Network, self).__init__(apiresource)
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apiresource):
apiresource['ipver_str'] = get_ipver_str(apiresource['ip_version'])
super(Subnet, self).__init__(apiresource)
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apiresource):
# Django cannot handle a key name with ':', so use '__'
for key in apiresource.keys():
if ':' in key:
apiresource['__'.join(key.split(':'))] = apiresource[key]
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
if 'mac_learning_enabled' in apiresource:
apiresource['mac_state'] = \
ON_STATE if apiresource['mac_learning_enabled'] else OFF_STATE
super(Port, self).__init__(apiresource)
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
def __init__(self, apiresource):
super(Profile, self).__init__(apiresource)
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
super(Router, self).__init__(apiresource)
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __unicode__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
rule = self.client.create_security_group_rule(body)
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = SortedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def allocate(self, pool):
body = {'floatingip': {'floating_network_id': pool,
'tenant_id': self.request.user.project_id}}
fip = self.client.create_floatingip(body).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request)
server_dict = SortedDict([(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
if is_service_enabled(self.request,
config_name='enable_lb',
ext_name='lbaas'):
# Also get the loadbalancer VIPs
vip_dict = {v['port_id']: v['name']
for v in self.client.list_vips().get('vips', [])}
else:
vip_dict = {}
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id) or vip_dict.get(port_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address'])
for p in ports]
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len / filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
def network_list_for_tenant(request, tenant_id, **params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%s, params=%s"
% (tenant_id, params))
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks = network_list(request, tenant_id=tenant_id,
shared=False, **params)
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
return networks
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
**params).get('network')
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
if expand_subnet:
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
def network_create(request, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Subnet object
"""
LOG.debug("network_create(): kwargs = %s" % kwargs)
# In the case network profiles are being used, profile id is needed.
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
def subnet_create(request, network_id, cidr, ip_version, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: subnet IP address range
:param ip_version: IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:returns: Subnet object
"""
LOG.debug("subnet_create(): netid=%s, cidr=%s, ipver=%d, kwargs=%s"
% (network_id, cidr, ip_version, kwargs))
body = {'subnet':
{'network_id': network_id,
'ip_version': ip_version,
'cidr': cidr}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%s, kwargs=%s" % (network_id, kwargs))
# In the case policy profiles are being used, profile id is needed.
if 'policy_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id')
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
profiles = neutronclient(request).list_network_profiles(
**params).get('network_profiles')
elif type_p == 'policy':
profiles = neutronclient(request).list_policy_profiles(
**params).get('policy_profiles')
return [Profile(n) for n in profiles]
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
{'profileid': profile_id, 'params': params})
profile = neutronclient(request).show_network_profile(
profile_id, **params).get('network_profile')
return Profile(profile)
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
body['network_profile'].update(kwargs)
profile = neutronclient(request).create_network_profile(
body=body).get('network_profile')
return Profile(profile)
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
{'profileid': profile_id, 'kwargs': kwargs})
body = {'network_profile': kwargs}
profile = neutronclient(request).update_network_profile(
profile_id, body=body).get('network_profile')
return Profile(profile)
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
bindings = neutronclient(request).list_network_profile_bindings(
**params).get('network_profile_bindings')
elif type_p == 'policy':
bindings = neutronclient(request).list_policy_profile_bindings(
**params).get('policy_profile_bindings')
return [Profile(n) for n in bindings]
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(e)
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception as e:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise e
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
if 'extensions' in extensions_list:
return extensions_list['extensions']
else:
return {}
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
if hasattr(settings, 'OPENSTACK_QUANTUM_NETWORK'):
warnings.warn(
'OPENSTACK_QUANTUM_NETWORK setting is deprecated and will be '
'removed in the near future. '
'Please use OPENSTACK_NEUTRON_NETWORK instead.',
DeprecationWarning)
network_config = (getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) or
getattr(settings, 'OPENSTACK_QUANTUM_NETWORK', {}))
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
if (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas')):
return True
else:
return False
# Using this mechanism till a better plugin/sub-plugin detection
# mechanism is available.
# When using specific plugins the profile_support can be
# turned on if needed to configure and/or use profiles.
# Since this is a temporary mechanism used to detect profile_support
# @memorize is not being used.
# TODO(absubram): Change this config variable check with
# subplugin/plugin detection API when it becomes available.
def is_port_profiles_supported():
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
# Can be used to check for vendor specific plugin
profile_support = network_config.get('profile_support', None)
if str(profile_support).lower() == 'cisco':
return True
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if feature_policies and policy_check:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
| |
from __future__ import print_function
import sys
import time
import traceback
import warnings
from functools import wraps
from six import iteritems
from . import formats
from . import levels
from . import outputs
from .lib import iso8601time
from .message import Message
def emit(level):
"""a decorator that emits at `level <.LogLevel>` after calling the method. The method
should return a `.Logger` instance.
For convenience, decorators for the various levels are available as
``emit.debug``, ``emit.info``, etc..
"""
def decorator(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
f(self, * args, ** kwargs)._emit(level, '', [], {})
return wrapper
return decorator
emit.debug = emit(levels.DEBUG)
emit.info = emit(levels.INFO)
emit.notice = emit(levels.NOTICE)
emit.warning = emit(levels.WARNING)
emit.error = emit(levels.ERROR)
emit.critical = emit(levels.CRITICAL)
class BaseLogger(object):
"""Base class for loggers"""
__slots__ = ['_fields', '_options', 'min_level']
__valid_options = set(Message._default_options)
def __init__(self, fields=None, options=None, min_level=None):
"""Constructor for internal module use only, basically.
``fields`` and ``options`` will be copied.
"""
self._fields = fields.copy() if fields is not None else {}
self._options = options.copy() if options is not None else Message._default_options.copy()
self.min_level = min_level if min_level is not None else levels.DEBUG
def _clone(self):
return self.__class__(fields=self._fields, options=self._options, min_level=self.min_level)
def _emit(self, level, format_spec, args, kwargs):
raise NotImplementedError
#
# The Magic
#
def fields(self, **kwargs):
"""bind fields for structured logging"""
return self.fields_dict(kwargs)
def fields_dict(self, d):
"""bind fields for structured logging.
Use this instead of `.fields` if you have keys which are not valid Python identifiers.
"""
clone = self._clone()
clone._fields.update(d)
return clone
def options(self, **kwargs):
"""bind option for message creation."""
bad_options = set(kwargs) - self.__valid_options
if bad_options:
raise ValueError("Invalid options {0!r}".format(tuple(bad_options)))
clone = self._clone()
clone._options.update(kwargs)
return clone
#
# Convenience
#
def trace(self, trace='error'):
"""convenience method to enable traceback logging"""
return self.options(trace=trace)
def name(self, name):
"""convenvience method to bind ``name`` field"""
return self.fields(name=name)
#
# Do something
#
def debug(self, format_spec='', *args, **kwargs):
"""Emit at ``DEBUG`` level"""
self._emit(levels.DEBUG, format_spec, args, kwargs)
def info(self, format_spec='', *args, **kwargs):
"""Emit at ``INFO`` level"""
self._emit(levels.INFO, format_spec, args, kwargs)
def notice(self, format_spec='', *args, **kwargs):
"""Emit at ``NOTICE`` level"""
self._emit(levels.NOTICE, format_spec, args, kwargs)
return True
def warning(self, format_spec='', *args, **kwargs):
"""Emit at ``WARNING`` level"""
self._emit(levels.WARNING, format_spec, args, kwargs)
def error(self, format_spec='', *args, **kwargs):
"""Emit at ``ERROR`` level"""
self._emit(levels.ERROR, format_spec, args, kwargs)
def critical(self, format_spec='', *args, **kwargs):
"""Emit at ``CRITICAL`` level"""
self._emit(levels.CRITICAL, format_spec, args, kwargs)
class InternalLogger(BaseLogger):
"""
Special-purpose logger for internal uses
Sends messages directly to output, bypassing :data:`.emitters`.
:ivar `Output` output: an output to write to
"""
__slots__ = ['output']
def __init__(self, output, fields=None, options=None, min_level=None):
super(InternalLogger, self).__init__(fields, options, min_level)
self.output = output
def _clone(self):
return self.__class__(fields=self._fields, options=self._options,
min_level=self.min_level, output=self.output)
def _emit(self, level, format_spec, args, kwargs):
"""does work of emitting - for internal use"""
if level < self.min_level:
return
try:
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(),
args, kwargs)
except Exception:
msg = None
raise
else:
self.output.output(msg)
except Exception:
print(iso8601time(), "Error in twiggy internal log! Something is seriously broken.",
file=sys.stderr)
print("Offending message:", repr(msg), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
class Logger(BaseLogger):
"""Logger for end-users"""
__slots__ = ['_emitters', 'filter']
def _feature_noop(self, *args, **kwargs):
return self._clone()
@classmethod
def addFeature(cls, func, name=None):
"""add a feature to the class
:arg func: the function to add
:arg string name: the name to add it under. If None, use the function's name.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
name = name if name is not None else func.__name__
setattr(cls, name, func)
@classmethod
def disableFeature(cls, name):
"""disable a feature.
A method will still exist by this name, but it won't do anything.
:arg string name: the name of the feature to disable.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
# get func directly from class dict - we don't want an unbound method.
setattr(cls, name, cls.__dict__['_feature_noop'])
@classmethod
def delFeature(cls, name):
"""delete a feature entirely
:arg string name: the name of the feature to remove
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
delattr(cls, name)
def __init__(self, fields=None, options=None, emitters=None,
min_level=None, filter=None):
super(Logger, self).__init__(fields, options, min_level)
#: a dict of emitters
self._emitters = emitters if emitters is not None else {}
self.filter = filter if filter is not None else lambda format_spec: True
def _clone(self):
"""return a new Logger instance with copied attributes
Probably only for internal use.
"""
return self.__class__(fields=self._fields, options=self._options,
emitters=self._emitters, min_level=self.min_level,
filter=self.filter)
@emit.info
def struct(self, **kwargs):
"""convenience method for structured logging.
Calls fields() and emits at INFO
"""
return self.fields(**kwargs)
@emit.info
def struct_dict(self, d):
"""convenience method for structured logging.
Use instead of struct() if you have keys which are not valid Python identifiers
"""
return self.fields_dict(d)
#
# Boring stuff
#
def _emit(self, level, format_spec, args, kwargs):
"""does the work of emitting - for internal use"""
# XXX should these traps be collapsed?
if level < self.min_level:
return
try:
if not self.filter(format_spec):
return
except Exception:
internal_log.info("Error in Logger filtering with {0} on {1}",
repr(self.filter), format_spec)
# just continue emitting in face of filter error
# XXX should we trap here too b/c of "Dictionary changed size during iteration" (or
# other rare errors?)
potential_emitters = [(name, emitter) for name, emitter in iteritems(self._emitters)
if level >= emitter.min_level]
if not potential_emitters:
return
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(),
args, kwargs)
except Exception:
# XXX use .fields() instead?
internal_log.info("Error formatting message level: {0!r}, format: {1!r},"
" fields: {2!r}, options: {3!r}, args: {4!r}, kwargs: {5!r}",
level, format_spec, self._fields, self._options, args, kwargs)
return
outputs = set()
# sort to make things deterministic (for tests, mainly)
for name, emitter in sorted(potential_emitters):
try:
include = emitter.filter(msg)
except Exception:
internal_log.info("Error filtering with emitter {0}. Filter: {1}"
" Message: {2!r}", name, repr(emitter.filter), msg)
include = True # output anyway if error
if include:
outputs.add(emitter._output)
for o in outputs:
try:
o.output(msg)
except Exception:
internal_log.warning("Error outputting with {0!r}. Message: {1!r}", o, msg)
__fields = {'time': time.gmtime}
__internal_format = formats.LineFormat(conversion=formats.line_conversion)
__internal_output = outputs.StreamOutput(format=__internal_format, stream=sys.stderr)
internal_log = InternalLogger(fields=__fields, output=__internal_output
).name('twiggy.internal').trace('error')
| |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import logging
import tensorflow as tf
import tensorflow_model_analysis as tfma
import metadata
# ******************************************************************************
# YOU NEED NOT TO CHANGE THESE HELPER FUNCTIONS
# *****************************************************************************
def get_feature_spec(is_serving=False):
"""Create feature_spec from metadata. Used for parsing tf examples.
Args:
is_serving: boolean - whether to create feature_spec for training o serving.
Returns:
feature_spec
"""
column_names = metadata.SERVING_COLUMN_NAMES \
if is_serving else metadata.COLUMN_NAMES
feature_spec = {}
for feature_name in column_names:
if feature_name in metadata.NUMERIC_FEATURE_NAMES_WITH_STATS:
feature_spec[feature_name] = tf.FixedLenFeature(shape=1, dtype=tf.float32)
elif feature_name in metadata.CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY:
feature_spec[feature_name] = tf.FixedLenFeature(shape=1, dtype=tf.int32)
elif feature_name in metadata.CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY:
feature_spec[feature_name] = tf.FixedLenFeature(shape=1, dtype=tf.string)
elif feature_name in metadata.CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET:
feature_spec[feature_name] = tf.FixedLenFeature(shape=1, dtype=tf.string)
elif feature_name == metadata.TARGET_NAME:
if metadata.TASK_TYPE == 'classification':
feature_spec[feature_name] = tf.FixedLenFeature(shape=1,
dtype=tf.string)
else:
feature_spec[feature_name] = tf.FixedLenFeature(shape=1,
dtype=tf.float32)
return feature_spec
def parse_csv(csv_row, is_serving=False):
"""Takes the string input tensor (csv) and returns a dict of rank-2 tensors.
Takes a rank-1 tensor and converts it into rank-2 tensor, with respect to
its data type (inferred from the metadata).
Args:
csv_row: rank-2 tensor of type string (csv).
is_serving: boolean to indicate whether this function is called during
serving or training, since the csv_row serving input is different than
the training input (i.e., no target column).
Returns:
rank-2 tensor of the correct data type.
"""
if is_serving:
column_names = metadata.SERVING_COLUMN_NAMES
defaults = []
# create the defaults for the serving columns.
for serving_feature in metadata.SERVING_COLUMN_NAMES:
feature_index = metadata.COLUMN_NAMES.index(serving_feature)
defaults.append(metadata.DEFAULTS[feature_index])
else:
column_names = metadata.COLUMN_NAMES
defaults = metadata.DEFAULTS
columns = tf.decode_csv(csv_row, record_defaults=defaults)
features = dict(zip(column_names, columns))
return features
# ******************************************************************************
# YOU MAY IMPLEMENT THIS FUNCTION FOR CUSTOM FEATURE ENGINEERING
# ******************************************************************************
def process_features(features):
""" Use to implement custom feature engineering logic.
Default behaviour is to return the original feature tensors dictionary as-is.
Args:
features: {string:tensors} - dictionary of feature tensors
Returns:
{string:tensors}: extended feature tensors dictionary
"""
# examples - given:
# 'x' and 'y' are two numeric features:
# 'alpha' and 'beta' are two categorical features
# # create new features using custom logic
# features['x_2'] = tf.pow(features['x'],2)
# features['y_2'] = tf.pow(features['y'], 2)
# features['xy'] = features['x'] * features['y']
# features['sin_x'] = tf.sin(features['x'])
# features['cos_y'] = tf.cos(features['x'])
# features['log_xy'] = tf.log(features['xy'])
# features['sqrt_xy'] = tf.sqrt(features['xy'])
# # add created features to metadata (if not already defined in metadata.py)
# NUMERIC_FEATURE_NAMES_WITH_STATS['x_2']: None
# NUMERIC_FEATURE_NAMES_WITH_STATS['y_2']: None
# ....
return features
# ******************************************************************************
# YOU NEED NOT TO CHANGE THIS FUNCTION TO READ DATA FILES
# ******************************************************************************
def make_input_fn(file_pattern,
file_encoding='csv',
mode=tf.estimator.ModeKeys.EVAL,
has_header=False,
batch_size=128,
multi_threading=True):
"""Makes an input function for reading training and evaluation data file(s).
Args:
file_pattern: str - file name or file name patterns from which to read the data.
mode: tf.estimator.ModeKeys - either TRAIN or EVAL.
Used to determine whether or not to randomize the order of data.
file_encoding: type of the text files. Can be 'csv' or 'tfrecords'
has_header: boolean - set to non-zero in order to skip header lines in CSV files.
num_epochs: int - how many times through to read the data.
If None will loop through data indefinitely
batch_size: int - first dimension size of the Tensors returned by input_fn
multi_threading: boolean - indicator to use multi-threading or not
Returns:
A function () -> (features, indices) where features is a dictionary of
Tensors, and indices is a single Tensor of label indices.
"""
shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
num_epochs = None if mode == tf.estimator.ModeKeys.TRAIN else 1
num_threads = multiprocessing.cpu_count() if multi_threading else 1
buffer_size = 2 * batch_size + 1
logging.info("Making input_fn...")
logging.info("Mode: {}.".format(mode))
logging.info("Input file(s): {}.".format(file_pattern))
logging.info("Files encoding: {}.".format(file_encoding))
logging.info("Batch size: {}.".format(batch_size))
logging.info("Epoch count: {}.".format(num_epochs))
logging.info("Thread count: {}.".format(num_threads))
logging.info("Shuffle: {}.".format(shuffle))
def _input_fn():
if file_encoding == 'csv':
dataset = tf.data.experimental.make_csv_dataset(
file_pattern,
batch_size,
column_names=metadata.COLUMN_NAMES,
column_defaults=metadata.DEFAULTS,
label_name=metadata.TARGET_NAME,
field_delim=',',
header=has_header,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer_size=buffer_size,
num_parallel_reads=num_threads,
sloppy=True,
)
else:
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern,
batch_size,
features=get_feature_spec(),
reader=tf.data.TFRecordDataset,
label_key=metadata.TARGET_NAME,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer_size=buffer_size,
reader_num_threads=num_threads,
parser_num_threads=num_threads,
sloppy_ordering=True,
drop_final_batch=False
)
dataset = dataset.map(
lambda features, target: (process_features(features), target))
return dataset
return _input_fn
# ******************************************************************************
# SERVING INPUT FUNCTIONS - YOU NEED NOT TO CHANGE THE FOLLOWING PART
# ******************************************************************************
def json_serving_input_receiver_fn():
"""Creating an ServingInputReceiver object for JSON data.
Returns:
ServingInputReceiver
"""
# Note that the inputs are raw features, not transformed features.
receiver_tensors = {}
for column_name in metadata.SERVING_COLUMN_NAMES:
if column_name in metadata.CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY:
receiver_tensors[column_name] = tf.placeholder(
shape=[None], dtype=tf.int32)
elif column_name in metadata.NUMERIC_FEATURE_NAMES_WITH_STATS:
receiver_tensors[column_name] = tf.placeholder(
shape=[None], dtype=tf.float32)
else:
receiver_tensors[column_name] = tf.placeholder(
shape=[None], dtype=tf.string)
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in receiver_tensors.items()
}
return tf.estimator.export.ServingInputReceiver(
features=process_features(features),
receiver_tensors=receiver_tensors
)
def csv_serving_input_receiver_fn():
"""Creating an ServingInputReceiver object for CSV data.
Returns:
ServingInputReceiver
"""
# Note that the inputs are raw features, not transformed features.
csv_row = tf.placeholder(shape=[None], dtype=tf.string)
features = parse_csv(csv_row, is_serving=True)
return tf.estimator.export.ServingInputReceiver(
features=process_features(features),
receiver_tensors={'csv_row': csv_row}
)
def example_serving_input_receiver_fn():
"""Creating an ServingInputReceiver object for TFRecords data.
Returns:
ServingInputReceiver
"""
# Note that the inputs are raw features, not transformed features.
receiver_tensors = tf.placeholder(shape=[None], dtype=tf.string)
features = tf.parse_example(
receiver_tensors,
features=get_feature_spec(is_serving=True)
)
for key in features:
features[key] = tf.expand_dims(features[key], -1)
return tf.estimator.export.ServingInputReceiver(
features=process_features(features),
receiver_tensors={'example_proto': receiver_tensors}
)
SERVING_INPUT_RECEIVER_FUNCTIONS = {
'JSON': json_serving_input_receiver_fn,
'EXAMPLE': example_serving_input_receiver_fn,
'CSV': csv_serving_input_receiver_fn
}
# ******************************************************************************
# EVALUATING INPUT FUNCTIONS - YOU NEED NOT TO CHANGE THE FOLLOWING PART
# ******************************************************************************
def csv_evaluating_input_receiver_fn():
"""Creating an EvalInputReceiver object for CSV data.
Returns:
EvalInputReceiver
"""
# Notice that the inputs are raw features, not transformed features.
csv_row = tf.placeholder(shape=[None], dtype=tf.string)
features = parse_csv(csv_row, is_serving=False)
target = features.pop(metadata.TARGET_NAME)
return tfma.export.EvalInputReceiver(
features=process_features(features),
receiver_tensors={'examples': csv_row},
labels=target)
def example_evaluating_input_receiver_fn():
"""Creating an EvalInputReceiver object for TFRecords data.
Returns:
EvalInputReceiver
"""
tf_example = tf.placeholder(shape=[None], dtype=tf.string)
features = tf.parse_example(
tf_example,
features=get_feature_spec(is_serving=False))
for key in features:
features[key] = tf.expand_dims(features[key], -1)
return tfma.export.EvalInputReceiver(
features=process_features(features),
receiver_tensors={'examples': tf_example},
labels=features[metadata.TARGET_NAME])
EVALUATING_INPUT_RECEIVER_FUNCTIONS = {
'EXAMPLE': example_evaluating_input_receiver_fn,
'CSV': csv_evaluating_input_receiver_fn
}
| |
#!/usr/bin/env python
# ---- MODULE DOCSTRING
__doc__ = """
(C) Hive, Romain Wuilbercq, 2017
_
/_/_ .'''.
=O(_)))) ...' `.
\_\ `. .'''X
`..'
.---. .---..-./`) ,---. ,---. .-''-.
| | |_ _|\ .-.')| / | | .'_ _ \
| | ( ' )/ `-' \| | | .'/ ( ` ) '
| '-(_{;}_)`-'`"`| | _ | |. (_ o _) |
| (_,_) .---. | _( )_ || (_,_)___|
| _ _--. | | | \ (_ o._) /' \ .---.
|( ' ) | | | | \ (_,_) / \ `-' /
(_{;}_)| | | | \ / \ /
'(_,_) '---' '---' `---` `'-..-'
The Artificial Bee Colony (ABC) algorithm is based on the
intelligent foraging behaviour of honey bee swarm, and was first proposed
by Karaboga in 2005.
Author:
------
Romain Wuilbercq
"""
# ---- IMPORT MODULES
import random
import sys
import copy
# ---- BEE CLASS
class Bee(object):
""" Creates a bee object. """
def __init__(self, lower, upper, fun, funcon=None):
"""
Instantiates a bee object randomly.
Parameters:
----------
:param list lower : lower bound of solution vector
:param list upper : upper bound of solution vector
:param def fun : evaluation function
:param def funcon : constraints function, must return a boolean
"""
# creates a random solution vector
self._random(lower, upper)
# checks if the problem constraint(s) are satisfied
if not funcon:
self.valid = True
else:
self.valid = funcon(self.vector)
# computes fitness of solution vector
if (fun != None):
self.value = fun(self.vector)
else:
self.value = sys.float_info.max
self._fitness()
# initialises trial limit counter - i.e. abandonment counter
self.counter = 0
def _random(self, lower, upper):
""" Initialises a solution vector randomly. """
self.vector = []
for i in range(len(lower)):
self.vector.append( lower[i] + random.random() * (upper[i] - lower[i]) )
def _fitness(self):
"""
Evaluates the fitness of a solution vector.
The fitness is a measure of the quality of a solution.
"""
if (self.value >= 0):
self.fitness = 1 / (1 + self.value)
else:
self.fitness = 1 + abs(self.value)
class BeeHive(object):
"""
Creates an Artificial Bee Colony (ABC) algorithm.
The population of the hive is composed of three distinct types
of individuals:
1. "employees",
2. "onlookers",
3. "scouts".
The employed bees and onlooker bees exploit the nectar
sources around the hive - i.e. exploitation phase - while the
scouts explore the solution domain - i.e. exploration phase.
The number of nectar sources around the hive is equal to
the number of actively employed bees and the number of employees
is equal to the number of onlooker bees.
"""
def run(self):
""" Runs an Artificial Bee Colony (ABC) algorithm. """
cost = {}; cost["best"] = []; cost["mean"] = []
for itr in range(self.max_itrs):
# employees phase
for index in range(self.size):
self.send_employee(index)
# onlookers phase
self.send_onlookers()
# scouts phase
self.send_scout()
# computes best path
self.find_best()
# stores convergence information
cost["best"].append( self.best )
cost["mean"].append( sum( [ bee.value for bee in self.population ] ) / self.size )
# prints out information about computation
if self.verbose:
self._verbose(itr, cost)
return cost
def __init__(self ,
lower, upper ,
fun = None ,
numb_bees = 30 ,
max_itrs = 100 ,
max_trials = None ,
selfun = None ,
seed = None ,
verbose = False ,
extra_params = None ,):
"""
Instantiates a bee hive object.
1. INITIALISATION PHASE.
-----------------------
The initial population of bees should cover the entire search space as
much as possible by randomizing individuals within the search
space constrained by the prescribed lower and upper bounds.
Parameters:
----------
:param list lower : lower bound of solution vector
:param list upper : upper bound of solution vector
:param def fun : evaluation function of the optimal problem
:param def numb_bees : number of active bees within the hive
:param int max_trials : max number of trials without any improvment
:param def selfun : custom selection function
:param int seed : seed of random number generator
:param boolean verbose : makes computation verbose
:param dict extra_params : optional extra arguments for selection function selfun
"""
# checks input
assert (len(upper) == len(lower)), "'lower' and 'upper' must be a list of the same length."
# generates a seed for the random number generator
if (seed == None):
self.seed = random.randint(0, 1000)
else:
self.seed = seed
random.seed(self.seed)
# computes the number of employees
self.size = int((numb_bees + numb_bees % 2))
# assigns properties of algorithm
self.dim = len(lower)
self.max_itrs = max_itrs
if (max_trials == None):
self.max_trials = 0.6 * self.size * self.dim
else:
self.max_trials = max_trials
self.selfun = selfun
self.extra_params = extra_params
# assigns properties of the optimisation problem
self.evaluate = fun
self.lower = lower
self.upper = upper
# initialises current best and its a solution vector
self.best = sys.float_info.max
self.solution = None
# creates a bee hive
self.population = [ Bee(lower, upper, fun) for i in range(self.size) ]
# initialises best solution vector to food nectar
self.find_best()
# computes selection probability
self.compute_probability()
# verbosity of computation
self.verbose = verbose
def find_best(self):
""" Finds current best bee candidate. """
values = [ bee.value for bee in self.population ]
index = values.index(min(values))
if (values[index] < self.best):
self.best = values[index]
self.solution = self.population[index].vector
def compute_probability(self):
"""
Computes the relative chance that a given solution vector is
chosen by an onlooker bee after the Waggle dance ceremony when
employed bees are back within the hive.
"""
# retrieves fitness of bees within the hive
values = [bee.fitness for bee in self.population]
max_values = max(values)
# computes probalities the way Karaboga does in his classic ABC implementation
if (self.selfun == None):
self.probas = [0.9 * v / max_values + 0.1 for v in values]
else:
if (self.extra_params != None):
self.probas = self.selfun(list(values), **self.extra_params)
else:
self.probas = self.selfun(values)
# returns intervals of probabilities
return [sum(self.probas[:i+1]) for i in range(self.size)]
def send_employee(self, index):
"""
2. SEND EMPLOYED BEES PHASE.
---------------------------
During this 2nd phase, new candidate solutions are produced for
each employed bee by cross-over and mutation of the employees.
If the modified vector of the mutant bee solution is better than
that of the original bee, the new vector is assigned to the bee.
"""
# deepcopies current bee solution vector
zombee = copy.deepcopy(self.population[index])
# draws a dimension to be crossed-over and mutated
d = random.randint(0, self.dim-1)
# selects another bee
bee_ix = index;
while (bee_ix == index): bee_ix = random.randint(0, self.size-1)
# produces a mutant based on current bee and bee's friend
zombee.vector[d] = self._mutate(d, index, bee_ix)
# checks boundaries
zombee.vector = self._check(zombee.vector, dim=d)
# computes fitness of mutant
zombee.value = self.evaluate(zombee.vector)
zombee._fitness()
# deterministic crowding
if (zombee.fitness > self.population[index].fitness):
self.population[index] = copy.deepcopy(zombee)
self.population[index].counter = 0
else:
self.population[index].counter += 1
def send_onlookers(self):
"""
3. SEND ONLOOKERS PHASE.
-----------------------
We define as many onlooker bees as there are employed bees in
the hive since onlooker bees will attempt to locally improve the
solution path of the employed bee they have decided to follow
after the waggle dance phase.
If they improve it, they will communicate their findings to the bee
they initially watched "waggle dancing".
"""
# sends onlookers
numb_onlookers = 0; beta = 0
while (numb_onlookers < self.size):
# draws a random number from U[0,1]
phi = random.random()
# increments roulette wheel parameter beta
beta += phi * max(self.probas)
beta %= max(self.probas)
# selects a new onlooker based on waggle dance
index = self.select(beta)
# sends new onlooker
self.send_employee(index)
# increments number of onlookers
numb_onlookers += 1
def select(self, beta):
"""
4. WAGGLE DANCE PHASE.
---------------------
During this 4th phase, onlooker bees are recruited using a roulette
wheel selection.
This phase represents the "waggle dance" of honey bees (i.e. figure-
eight dance). By performing this dance, successful foragers
(i.e. "employed" bees) can share, with other members of the
colony, information about the direction and distance to patches of
flowers yielding nectar and pollen, to water sources, or to new
nest-site locations.
During the recruitment, the bee colony is re-sampled in order to mostly
keep, within the hive, the solution vector of employed bees that have a
good fitness as well as a small number of bees with lower fitnesses to
enforce diversity.
Parameter(s):
------------
:param float beta : "roulette wheel selection" parameter - i.e. 0 <= beta <= max(probas)
"""
# computes probability intervals "online" - i.e. re-computed after each onlooker
probas = self.compute_probability()
# selects a new potential "onlooker" bee
for index in range(self.size):
if (beta < probas[index]):
return index
def send_scout(self):
"""
5. SEND SCOUT BEE PHASE.
-----------------------
Identifies bees whose abandonment counts exceed preset trials limit,
abandons it and creates a new random bee to explore new random area
of the domain space.
In real life, after the depletion of a food nectar source, a bee moves
on to other food sources.
By this means, the employed bee which cannot improve their solution
until the abandonment counter reaches the limit of trials becomes a
scout bee. Therefore, scout bees in ABC algorithm prevent stagnation
of employed bee population.
Intuitively, this method provides an easy means to overcome any local
optima within which a bee may have been trapped.
"""
# retrieves the number of trials for all bees
trials = [ self.population[i].counter for i in range(self.size) ]
# identifies the bee with the greatest number of trials
index = trials.index(max(trials))
# checks if its number of trials exceeds the pre-set maximum number of trials
if (trials[index] > self.max_trials):
# creates a new scout bee randomly
self.population[index] = Bee(self.lower, self.upper, self.evaluate)
# sends scout bee to exploit its solution vector
self.send_employee(index)
def _mutate(self, dim, current_bee, other_bee):
"""
Mutates a given solution vector - i.e. for continuous
real-values.
Parameters:
----------
:param int dim : vector's dimension to be mutated
:param int current_bee : index of current bee
:param int other_bee : index of another bee to cross-over
"""
return self.population[current_bee].vector[dim] + \
(random.random() - 0.5) * 2 * \
(self.population[current_bee].vector[dim] - self.population[other_bee].vector[dim])
def _check(self, vector, dim=None):
"""
Checks that a solution vector is contained within the
pre-determined lower and upper bounds of the problem.
"""
if (dim == None):
range_ = range(self.dim)
else:
range_ = [dim]
for i in range_:
# checks lower bound
if (vector[i] < self.lower[i]):
vector[i] = self.lower[i]
# checks upper bound
elif (vector[i] > self.upper[i]):
vector[i] = self.upper[i]
return vector
def _verbose(self, itr, cost):
""" Displays information about computation. """
msg = "# Iter = {} | Best Evaluation Value = {} | Mean Evaluation Value = {} "
print(msg.format(int(itr), cost["best"][itr], cost["mean"][itr]))
# ---- END
| |
import numpy
from six import moves
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.connection import convolution_2d
from chainer import utils
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
import chainerx
class ConvolutionND(function_node.FunctionNode):
def __init__(self, ndim, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
self.ndim = ndim
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
self.cover_all = cover_all
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = groups
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == self.ndim + 2,
w_type.ndim == self.ndim + 2,
# Need to consider the case that group count > 1.
# x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype.kind == 'f',
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_chainerx(self, inputs):
# TODO(hvy): Support mixed precision.
if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
return chainer.Fallback
# TODO(hvy): Support dilate > 1.
if any(d != 1 for d in self.dilate):
return chainer.Fallback
# TODO(hvy): Support groups > 1.
if self.groups > 1:
return chainer.Fallback
if inputs[0].device.backend.name == 'cuda' and (
self.cover_all or self.ndim < 2):
return chainer.Fallback
return chainerx.conv(
*inputs, stride=self.stride, pad=self.pad,
cover_all=self.cover_all),
def _use_cudnn(self, x, W):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and self.ndim > 1)
def _forward_xp(self, x, W, b, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, W, b, xp)
else:
return self._forward_xp_core(x, W, b, xp)
def _forward_grouped_convolution_xp(self, x, W, b, xp):
# G: group count
# N: batch size
# iC: input channels
# oC: output channels
G = self.groups
N, iC = x.shape[:2]
oC = W.shape[0]
k_size = W.shape[2:]
iCg = iC // G
oCg = oC // G
dims = len(k_size)
if iC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of input channels')
if oC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of output channels')
xp = backend.get_array_module(x)
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
o_size = x.shape[-dims:]
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * utils.size_of_shape(k_size)
x = x.reshape(G, mul_len, N * utils.size_of_shape(o_size))
W = W.reshape(G, oCg, mul_len)
# (G, oCg, N*o_size) = (G, oCg, iCg*k_size) @ (G, iCg*k_size, N*o_size)
y = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
y = y.reshape(oC, N, *o_size)
y = xp.rollaxis(y, 1) # (N, oC, o_size...)
if b is not None:
y += b.reshape(1, b.size, *((1,) * dims))
return y,
def _forward_xp_core(self, x, W, b, xp):
ndim = self.ndim
ksize = W.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
# Make patch array.
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
# Compute correlation.
axes = tuple(moves.range(1, ndim + 2)) # (1, 2, ..., N+1)
y = xp.tensordot(col, W, (axes, axes)).astype(x.dtype, copy=False)
# Apply bias if given.
if b is not None:
y += b
# Roll c_O before the second in (n, y_1, y_2, ..., y_N, c_O).
return xp.rollaxis(y, ndim + 1, 1),
def _forward_cudnn(self, x, W, b):
out_c = W.shape[0] # (c_O, _, k_1, k_2, ..., k_N)
ksize = W.shape[2:]
n, c = x.shape[:2] # (n, c_I, d_1, d_2, ..., d_N)
dims = x.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
groups = self.groups
# Make empty array for result.
outs = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di) in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
y_shape = (n, out_c) + outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilate, groups,
auto_tune=auto_tune, tensor_core=tensor_core)
return y,
def forward(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, W, b, numpy)
elif not self._use_cudnn(x, W):
return self._forward_xp(x, W, b, cuda.cupy)
else:
return self._forward_cudnn(x, W, b)
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, W, stride=self.stride, pad=self.pad, outsize=x_shape,
dilate=self.dilate, groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = ConvolutionNDGradW(self).apply((x, gy))
ret.append(gW)
if 2 in indexes:
axis = (0,) + tuple(moves.range(2, gy.ndim))
gb = chainer.functions.sum(gy, axis=axis)
if gb.dtype != self.inputs[2].dtype:
gb = chainer.functions.cast(gb, self.inputs[2].dtype)
ret.append(gb)
return ret
class ConvolutionNDGradW(function_node.FunctionNode):
def __init__(self, convnd):
W_node = convnd.inputs[1]
self.ndim = convnd.ndim
self.ksize = W_node.shape[2:]
self.stride = convnd.stride
self.pad = convnd.pad
self.cover_all = convnd.cover_all
self.dilate = convnd.dilate
self.groups = convnd.groups
self.W_dtype = W_node.dtype
def _use_cudnn(self, x, gy):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == self.W_dtype
and gy.dtype == self.W_dtype
and self.ndim > 1)
def forward(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, gy, numpy)
elif not self._use_cudnn(x, gy):
return self._forward_xp(x, gy, cuda.cupy)
else:
return self._forward_cudnn(x, gy)
def _forward_xp(self, x, gy, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, gy, xp)
else:
return self._forward_xp_core(x, gy, xp)
def _forward_grouped_convolution_xp(self, x, gy, xp):
G = self.groups
N, iC = x.shape[:2]
oC = gy.shape[1]
o_size = gy.shape[2:]
o_size_prod = utils.size_of_shape(o_size)
k_size = self.ksize
dims = len(o_size)
iCg = iC // G
oCg = oC // G
# Do not check iCg and oCg because this class is rarely used alone
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * utils.size_of_shape(k_size)
x = x.reshape(G, mul_len, N * o_size_prod)
x = x.transpose(0, 2, 1) # (G, N*o_size, iCg*k_size)
gy = xp.rollaxis(gy, 1) # (oC, N, o_size...)
gy = gy.reshape(G, oCg, N * o_size_prod)
# (G, oCg, iCg*k_size) = (G, oCg, N*o_size) @ (G, N*o_size, iCg*k_size)
gW = convolution_2d._matmul(gy, x).astype(self.W_dtype, copy=False)
gW = gW.reshape(oC, iCg, *k_size)
return gW,
def _forward_xp_core(self, x, gy, xp):
# Compute filter weight gradient.
# (n, _, out_1, out_2, ..., out_N)
out_axes = (0,) + tuple(moves.range(2, self.ndim + 2))
# (n, _, _, ..., _, out_1, out_2, ..., out_N)
col_axes = (0,) + tuple(moves.range(self.ndim + 2, self.ndim * 2 + 2))
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (xp is numpy and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
gW = xp.tensordot(gy, col, (out_axes, col_axes)).astype(
self.W_dtype, copy=False)
return gW,
def _forward_cudnn(self, x, gy):
# Make empty arrays for result.
out_c = gy.shape[1]
in_c = x.shape[1] // self.groups
gW = cuda.cupy.empty(
(out_c, in_c) + self.ksize, dtype=self.W_dtype)
# Compute
pad = self.pad
stride = self.stride
dilate = self.dilate
groups = self.groups
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_filter(
x, gy, gW, pad, stride, dilate, groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, ggW, stride=self.stride, pad=self.pad, outsize=x_shape,
groups=self.groups, dilate=self.dilate)
ret.append(gx)
if 1 in indexes:
ggy = convolution_nd(
x, ggW, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, groups=self.groups,
dilate=self.dilate)
ret.append(ggy)
return ret
def convolution_nd(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""N-dimensional convolution function.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three variables: the
input ``x``, the filter weight ``W`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``convolution_nd`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
One-dimensional bias variable with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use cuDNN.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
.. note::
This function uses cuDNN implementation for its forward and backward
computation if ALL of the following conditions are satisfied:
- ``cuda.cudnn_enabled`` is ``True``
- ``chainer.config.use_cudnn`` is ``'always'`` or ``'auto'``
- The number of spatial dimensions is more than one.
- ``cover_all`` is ``False``
- The input's ``dtype`` is equal to the filter weight's.
- The ``dtype`` is FP16, FP32 or FP64. (FP16 is only available when
cuDNN version :math:`\\geq` v3.)
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
.. seealso::
:class:`~chainer.links.ConvolutionND` to manage the model parameters
``W`` and ``b``.
.. seealso:: :func:`convolution_2d`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> W = np.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(1, 3, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
"""
ndim = len(x.shape[2:])
fnode = ConvolutionND(
ndim, stride, pad, cover_all, dilate=dilate, groups=groups)
args = (x, W) if b is None else (x, W, b)
y, = fnode.apply(args)
return y
def convolution_1d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""1-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
def convolution_3d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""3-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
| |
# This module provides classes and procedures for converting the JSON example we have been provided with into
# Python classes.
# We only have an example of a create account request so far so to process that call JSONtoAccountRequest(JSONData)
# where JSONData is a string with an account request.
# Also includes fairly dodgy __str__ methods for testing purposes.
# Owain Kenway
# For reasons the request is enclosed in a Sysadmin object.
class SysAdmin:
known_keys = ["Id", "Type", "Status", "StartDate", "EndDate", "Machine", "Handler", "Approver", "Person", "ProjectGroup", "Project", "Account", "ExtraText"]
def __init__(self, SysAdminDict):
for a in SysAdminDict.keys():
if a not in self.known_keys:
print("Warning [SysAdmin]: Detected unknown key: " + a + ": " + str(SysAdminDict[a]))
self.Id=SysAdminDict["Id"]
self.Type=SysAdminDict["Type"]
self.Status=SysAdminDict["Status"]
self.StartDate=SysAdminDict["StartDate"]
self.EndDate=SysAdminDict["EndDate"]
self.Machine=SysAdminDict["Machine"]
self.HandlerName=SysAdminDict["Handler"]["Name"]
self.HandlerEmail=SysAdminDict["Handler"]["Email"]
self.Approver=""
# Approver is an instance of Person
if "Approver" in SysAdminDict.keys():
self.Approver=Person(SysAdminDict["Approver"])
self.Person=""
if "Person" in SysAdminDict.keys():
self.Person=Person(SysAdminDict["Person"])
self.ProjectGroup=ProjectGroup(SysAdminDict["ProjectGroup"])
self.Project=Project(SysAdminDict["Project"])
self.Account=""
if "Account" in SysAdminDict.keys():
self.Account=Account(SysAdminDict["Account"])
self.ExtraText=""
if "ExtraText" in SysAdminDict.keys():
self.ExtraText=SysAdminDict["ExtraText"]
def __str__(self):
return "SysAdmin: " + ",\n".join([self.Id,
self.Type,
self.Status,
self.StartDate,
self.EndDate,
self.HandlerName,
self.HandlerEmail,
str(self.Approver),
str(self.Person),
str(self.Project),
str(self.ProjectGroup),
str(self.Account),
self.Machine,
self.ExtraText])
class Project:
known_keys = ["Code", "Name", "Status", "ProjectClass", "FundingBody", "Machines", "TopGroup"]
def __init__(self, ProjectDict):
for a in ProjectDict.keys():
if a not in self.known_keys:
print("Warning [Project]: Detected unknown key: " + a + ": " + str(ProjectDict[a]))
self.Code=ProjectDict["Code"]
self.Name=ProjectDict["Name"]
self.Status=ProjectDict["Status"]
self.ProjectClass=ProjectDict["ProjectClass"]
self.FundingBody=ProjectDict["FundingBody"]
self.Machines=str.split(ProjectDict["Machines"], ",") # Comma sep list
self.TopGroup=ProjectGroup(ProjectDict["TopGroup"])
def __str__(self):
return "Project: " + ",\n".join([self.Code,
self.Name,
self.Status,
self.ProjectClass,
self.FundingBody,
str(self.Machines),
str(self.TopGroup)])
class ProjectGroup:
known_keys = ["Code", "GroupID"]
def __init__(self, ProjectGroupDict):
for a in ProjectGroupDict.keys():
if a not in self.known_keys:
print("Warning [ProjectGroup]: Detected unknown key: " + a + ": " + str(ProjectGroupDict[a]))
self.Code=ProjectGroupDict["Code"]
self.GroupID=ProjectGroupDict["GroupID"]
def __str__(self):
return "ProjectGroup: " + ",\n".join([self.Code,
self.GroupID])
class Account:
known_keys = ["Name", "GID", "Groups", "Person", "UID", "Machines"]
def __init__(self, AccountDict):
for a in AccountDict.keys():
if (a not in self.known_keys) and (not a.startswith("Group")):
print("Warning [Account]: Detected unknown key: " + a + ": " + str(AccountDict[a]))
self.Name=AccountDict["Name"]
self.GroupID=AccountDict["GID"]
# Need code for parsing group fmt
self.Groups=[]
for a in AccountDict.keys():
if (a.startswith("Group") and a != "Groups"):
self.Groups.append(ProjectGroup(AccountDict[a]))
self.Person=Person(AccountDict["Person"])
self.UserID=AccountDict["UID"]
self.Machines=str.split(AccountDict["Machines"], ",")
def __str__(self):
strGroups = "Groups: "
for a in self.Groups:
strGroups += str(a) + ",\n"
return "Account: " + ",\n".join([self.Name,
self.GroupID,
str(self.Person),
strGroups,
self.UserID,
str(self.Machines)])
class Person:
known_keys = ["Name", "Email", "WebName", "PublicKey", "NormalisedPublicKey", "HartreeName"]
def __init__(self, PersonDict):
for a in PersonDict.keys():
if a not in self.known_keys:
print("Warning [Person]: Detected unknown key: " + a + ": " + str(PersonDict[a]))
self.Title=PersonDict["Name"]["Title"]
if type(self.Title) == type(None):
self.Title = ""
self.FirstName=PersonDict["Name"]["Firstname"]
self.LastName=PersonDict["Name"]["Lastname"]
self.Email=PersonDict["Email"]
self.WebName=""
if "WebName" in PersonDict.keys():
self.WebName=PersonDict["WebName"]
self.PublicKey=""
if "PublicKey" in PersonDict.keys():
self.PublicKey=PersonDict["PublicKey"]
self.NormalisedPublicKey=""
if "NormalisedPublicKey" in PersonDict.keys():
self.NormalisedPublicKey=PersonDict["NormalisedPublicKey"]
self.HartreeName=""
if "HartreeName" in PersonDict.keys():
self.HartreeName=PersonDict["HartreeName"]
def __str__(self):
return "Person: " + ",\n".join([
self.Title,
self.FirstName,
self.LastName,
self.Email,
self.WebName,
self.PublicKey,
self.NormalisedPublicKey,
self.HartreeName])
class AccountRequest:
def __init__(self, SystemTicketDict):
self.Ticket=SysAdmin(SystemTicketDict["SysAdmin"])
# Convert String to objects.
def JSONtoTickets(JSONData):
import json
from io import StringIO
jsdata = json.load(StringIO(JSONData))
return JSONDataToTickets(jsdata)
# Convert JSON data structure to a list of objects
def JSONDataToTickets(JSONData):
Tickets = []
if type(JSONData) == list:
for a in JSONData:
Tickets.append(AccountRequest(a))
else:
Tickets.append(AccountRequest(JSONData))
return Tickets
# If this is run directly, process test.json in the current working directory and print the output as a string.
if __name__=="__main__":
import json
import sys
filename="test.json"
if len(sys.argv) > 1:
filename=sys.argv[1]
f = open(filename, 'r')
jdata=f.read()
ar=JSONtoTickets(jdata)
f.close()
for a in ar:
print(str(a.Ticket))
print("Number of tickets included: " + str(len(ar)))
| |
from questionnaire.models import Questionnaire, Section, SubSection, Question, QuestionGroupOrder, QuestionOption
from questionnaire.models.question_groups import QuestionGroup
from questionnaire.tests.base_test import BaseTest
class QuestionGroupTest(BaseTest):
def setUp(self):
self.question = Question.objects.create(text='Uganda Revision 2014 what what?', UID='abc123', answer_type='Text')
self.questionnaire = Questionnaire.objects.create(name="Uganda Revision 2014", description="some description")
self.section = Section.objects.create(title="Immunisation Coverage", order=1, questionnaire=self.questionnaire)
self.sub_section = SubSection.objects.create(title="Immunisation Extra Coverage", order=1, section=self.section)
self.parent_question_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
self.sub_grouped_question = QuestionGroup.objects.create(subsection=self.sub_section, parent=self.parent_question_group, order=2)
self.sub_grouped_question.question.add(self.question)
def test_grouped_questions_field(self):
grouped_question = QuestionGroup()
fields = [str(item.attname) for item in grouped_question._meta.fields]
self.assertEqual(11, len(fields))
for field in ['id', 'created', 'modified','subsection_id', 'name', 'instructions', 'parent_id', 'order', 'grid',
'display_all', 'allow_multiples']:
self.assertIn(field, fields)
def test_grouped_questions_store(self):
grouped_question = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
grouped_question.question.add(self.question)
self.failUnless(grouped_question.id)
self.assertEqual(1, grouped_question.order)
self.assertEqual(self.sub_section, grouped_question.subsection)
all_questions = grouped_question.question.all()
self.assertEqual(1, all_questions.count())
self.assertEqual(self.question, all_questions[0])
self.assertIsNone(grouped_question.name)
self.assertIsNone(grouped_question.instructions)
def test_grouped_questions_store_parent(self):
self.assertEqual(self.parent_question_group, self.sub_grouped_question.parent)
self.failUnless(self.sub_grouped_question.id)
def test_grouped_question_can_get_its_questions(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, name="Laboratory Investigation")
question = Question.objects.create(text='Uganda Revision 2014 what what?', UID='ab3123', answer_type='Text')
sub_group.question.add(self.question, question)
self.assertEqual(2, len(sub_group.all_questions()))
self.assertIn(self.question, sub_group.all_questions())
self.assertIn(question, sub_group.all_questions())
def test_knows_its__subgroups(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
sub_group2 = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
other_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
sub_groups = self.parent_question_group.sub_groups()
self.assertEqual(3, len(sub_groups))
self.assertIn(sub_group, sub_groups)
self.assertIn(sub_group2, sub_groups)
self.assertIn(self.sub_grouped_question, sub_groups)
self.assertNotIn(other_group, sub_groups)
def test_knows_all_questions_even_those_of_subgroups(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
known_questions = self.parent_question_group.and_sub_group_questions()
self.assertEqual(3, len(known_questions))
self.assertIn(self.question, known_questions)
self.assertIn(question, known_questions)
self.assertIn(question2, known_questions)
def test_parent_group_ordered_questions(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
ordered_questions_including_those_of_sub_groups = self.parent_question_group.ordered_questions()
self.assertEqual(3, len(ordered_questions_including_those_of_sub_groups))
self.assertEqual(self.question, ordered_questions_including_those_of_sub_groups[0])
self.assertEqual(question, ordered_questions_including_those_of_sub_groups[1])
self.assertEqual(question2, ordered_questions_including_those_of_sub_groups[2])
def test_subgroups_ordered_questions(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
sub_group_ordered_questions = sub_group.ordered_questions()
self.assertEqual(2, len(sub_group_ordered_questions))
self.assertEqual(question, sub_group_ordered_questions[0])
self.assertEqual(question2, sub_group_ordered_questions[1])
def test_parent_group_question_orders(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
order1 = QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
order2 = QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
order3 = QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
orders_of_questions_including_those_of_sub_groups = self.parent_question_group.question_orders()
self.assertEqual(3, len(orders_of_questions_including_those_of_sub_groups))
self.assertEqual(order1, orders_of_questions_including_those_of_sub_groups[0])
self.assertEqual(order2, orders_of_questions_including_those_of_sub_groups[1])
self.assertEqual(order3, orders_of_questions_including_those_of_sub_groups[2])
def test_subgroups_question_orders(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
order1 = QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
order2 = QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
order3 = QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
sub_group_question_orders = sub_group.question_orders()
self.assertEqual(2, len(sub_group_question_orders))
self.assertEqual(order2, sub_group_question_orders[0])
self.assertEqual(order3, sub_group_question_orders[1])
def test_group_knows_if_it_has_more_than_one_question(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text')
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
order1 = QuestionGroupOrder.objects.create(question=self.question, question_group=self.parent_question_group, order=1)
order2 = QuestionGroupOrder.objects.create(question=question, question_group=self.parent_question_group, order=2)
order3 = QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=3)
self.assertTrue(sub_group.has_at_least_two_questions())
self.assertFalse(self.sub_grouped_question.has_at_least_two_questions())
def test_group_knows_its_primary_question(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text', is_primary=True)
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
self.assertTrue(1, sub_group.primary_question().count())
self.assertTrue(question, sub_group.primary_question())
def test_group_knows_its_non_primary_questions(self):
question1 = Question.objects.create(text='question', UID='ab3123', answer_type='Text', is_primary=True)
question2 = Question.objects.create(text='question1', UID='c00w01', answer_type='Text')
question3 = Question.objects.create(text='question2', UID='c00s01', answer_type='Text')
question4 = Question.objects.create(text='question3', UID='c00a01', answer_type='Text')
self.parent_question_group.question.add(question1, question2, question3, question4)
order2 = QuestionGroupOrder.objects.create(question=question1, question_group=self.parent_question_group, order=1)
order3 = QuestionGroupOrder.objects.create(question=question2, question_group=self.parent_question_group, order=2)
order4 = QuestionGroupOrder.objects.create(question=question3, question_group=self.parent_question_group, order=3)
order5 = QuestionGroupOrder.objects.create(question=question4, question_group=self.parent_question_group, order=4)
self.assertEqual(3, len(self.parent_question_group.all_non_primary_questions()))
self.assertNotIn(question1, self.parent_question_group.all_non_primary_questions())
for i in range(2, 3):
self.assertIn(eval("question%d" % i), self.parent_question_group.all_non_primary_questions())
def test_group_knows_maximum_order_of_its_questions(self):
self.assertEqual(0, self.parent_question_group.max_questions_order())
some_arbitrary_order = 20
self.question.orders.create(question_group=self.parent_question_group, order=some_arbitrary_order)
self.assertEqual(some_arbitrary_order, self.parent_question_group.max_questions_order())
def test_group_knows_if_it_has_sub_groups(self):
self.assertTrue(self.parent_question_group.has_subgroups())
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, parent=self.parent_question_group)
self.assertFalse(sub_group.has_subgroups())
def test_group_knows_its_questions_orders(self):
sub_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
question = Question.objects.create(text='question', UID='ab3123', answer_type='Text', is_primary=True)
question2 = Question.objects.create(text='question2', UID='c00001', answer_type='Text')
sub_group.question.add(question, question2)
order2 = QuestionGroupOrder.objects.create(question=question, question_group=sub_group, order=1)
order3 = QuestionGroupOrder.objects.create(question=question2, question_group=sub_group, order=2)
self.assertEqual(2, len(sub_group.get_orders()))
self.assertIn(order2, sub_group.get_orders())
self.assertIn(order3, sub_group.get_orders())
def test_group_adds_orders_for_its_primary_question_times_its_number_of_its_options(self):
question_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, grid=True, display_all=True)
question1 = Question.objects.create(text='Favorite beer 1', UID='C00001', answer_type='MultiChoice', is_primary=True)
option1 = QuestionOption.objects.create(text='tusker lager', question=question1)
option2 = QuestionOption.objects.create(text='tusker lager1', question=question1)
option3 = QuestionOption.objects.create(text='tusker lager2', question=question1)
question2 = Question.objects.create(text='question 2', instructions="instruction 2",
UID='C00002', answer_type='Text')
question3 = Question.objects.create(text='question 3', instructions="instruction 3",
UID='C00003', answer_type='Number')
question4 = Question.objects.create(text='question 4', instructions="instruction 2",
UID='C00005', answer_type='Date')
question_group.question.add(question1, question3, question2, question4)
order1 = QuestionGroupOrder.objects.create(question=question1, question_group=question_group, order=1)
order2 = QuestionGroupOrder.objects.create(question=question2, question_group=question_group, order=2)
order3 = QuestionGroupOrder.objects.create(question=question3, question_group=question_group, order=3)
order4 = QuestionGroupOrder.objects.create(question=question4, question_group=question_group, order=4)
self.assertEqual(12, len(question_group.get_orders()))
self.assertIn(order1, question_group.get_orders())
self.assertIn(order2, question_group.get_orders())
self.assertIn(order3, question_group.get_orders())
self.assertIn(order4, question_group.get_orders())
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import strutils
import six
from six.moves import http_client
from six.moves.urllib import parse
from ironicclient.common.apiclient import exceptions
from ironicclient.common.i18n import _
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == http_client.NO_CONTENT
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in kwargs.copy().items():
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion."""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in info.items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# Copyright 2013 Freescale, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Seetharama S. Ayyadevara, Freescale, Inc.
import logging
import sys
import time
import eventlet
from sqlalchemy.ext import sqlsoup
from quantum.agent import rpc as agent_rpc
from quantum.agent.linux import ip_lib
from quantum.agent.linux import ovs_lib
from quantum.agent.linux import utils
from quantum.common import constants as q_const
from quantum.common import config as logging_config
from quantum.common import topics
from quantum.common import utils as q_utils
from quantum.openstack.common import cfg
from quantum.openstack.common import context
from quantum.openstack.common import rpc
from quantum.openstack.common.rpc import dispatcher
from quantum.plugins.openvswitch.common import config
from quantum.plugins.openvswitch.common import constants
logging.basicConfig()
LOG = logging.getLogger(__name__)
# A placeholder for dead vlans.
DEAD_VLAN_TAG = "4095"
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping:
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class Port(object):
"""Represents a quantum port.
Class stores port data in a ORM-free way, so attributres are
still available even if a row has been deleted.
"""
def __init__(self, p):
self.id = p.id
self.network_id = p.network_id
self.device_id = p.device_id
self.admin_state_up = p.admin_state_up
self.status = p.status
def __eq__(self, other):
'''Compare only fields that will cause us to re-wire.'''
try:
return (self and other
and self.id == other.id
and self.admin_state_up == other.admin_state_up)
except:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
class OVSQuantumAgent(object):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier and is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual networks realized as a VLANs or flat network, a
veth is used to connect the local VLAN on the integration bridge
with the physical network bridge, with flow rules adding,
modifying, or stripping VLAN tags as necessary.
'''
# Lower bound on available vlans.
MIN_VLAN_TAG = 1
# Upper bound on available vlans.
MAX_VLAN_TAG = 4094
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self, integ_br, tun_br, local_ip,
bridge_mappings, root_helper,
polling_interval, reconnect_interval, rpc,
enable_tunneling, tenant_network_type,
mcast_ip, vxlan_udp_port, mcast_routing_interface):
'''Constructor.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to poll DB.
:param reconnect_internal: retry interval (secs) on DB error.
:param rpc: if True use RPC interface to interface with plugin.
:param enable_tunneling: if True enable GRE or VxLAN networks.
:param tenant_network_type: network type -- flat, vlan, gre, vxlan
:param mcast_ip: multicast ip address for VXLAN tunnels.
:param vxlan_udp_port: UDP port to listen on for VXLAN traffic.
:param mcast_routing_interface: Interface to add multicast route.
'''
self.root_helper = root_helper
self.available_local_vlans = set(
xrange(OVSQuantumAgent.MIN_VLAN_TAG,
OVSQuantumAgent.MAX_VLAN_TAG))
self.setup_integration_br(integ_br)
self.setup_physical_bridges(bridge_mappings)
self.local_vlan_map = {}
self.polling_interval = polling_interval
self.reconnect_interval = reconnect_interval
self.enable_tunneling = enable_tunneling
self.local_ip = local_ip
self.tunnel_count = 0
self.tenant_network_type = tenant_network_type
self.mcast_ip = mcast_ip
self.vxlan_udp_port = vxlan_udp_port
self.mcast_routing_interface = mcast_routing_interface
if self.enable_tunneling:
self.setup_tunnel_br(tun_br)
if self.tenant_network_type == constants.TYPE_VXLAN \
and self.mcast_ip:
ipdev = ip_lib.IPDevice(self.mcast_routing_interface,
self.root_helper)
ipdev.route.delete_host_dev(self.mcast_ip)
ipdev.route.add_host_dev(self.mcast_ip)
self.rpc = rpc
if rpc:
self.setup_rpc(integ_br)
def setup_rpc(self, integ_br):
mac = utils.get_interface_mac(integ_br)
self.agent_id = '%s%s' % ('ovs', (mac.replace(":", "")))
self.topic = topics.AGENT
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
# RPC network init
self.context = context.RequestContext('quantum', 'quantum',
is_admin=False)
# Handle updates from service
self.dispatcher = self.create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[constants.TUNNEL, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vif_id in vlan_mapping.vif_ports:
return network_id
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
network_type = kwargs.get('network_type')
LOG.debug("Delete %s", network_id)
# The network may not be defined on this agent
lvm = self.local_vlan_map.get(network_id)
if lvm:
self.reclaim_local_vlan(network_id, lvm)
else:
LOG.debug("Network %s not used on agent.", network_id)
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
network_type = kwargs.get('network_type')
segmentation_id = kwargs.get('segmentation_id')
physical_network = kwargs.get('physical_network')
vif_port = self.int_br.get_vif_port_by_id(port['id'])
self.treat_vif_port(vif_port, port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_id = kwargs.get('tunnel_id')
if tunnel_ip == self.local_ip:
return
tun_name = 'gre-%s' % tunnel_id
self.tun_br.add_gre_tunnel_port(tun_name, tunnel_ip)
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return dispatcher.RpcDispatcher([self])
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if not self.available_local_vlans:
LOG.error("No local VLAN available for net-id=%s", net_uuid)
return
lvid = self.available_local_vlans.pop()
LOG.info("Assigning %s as local vlan for net-id=%s", lvid, net_uuid)
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type,
physical_network,
segmentation_id)
if network_type == constants.TYPE_GRE:
if self.enable_tunneling:
# outbound
self.tun_br.add_flow(priority=4, in_port=self.patch_int_ofport,
dl_vlan=lvid,
actions="set_tunnel:%s,normal" %
segmentation_id)
# inbound bcast/mcast
self.tun_br.add_flow(priority=3, tun_id=segmentation_id,
dl_dst=
"01:00:00:00:00:00/01:00:00:00:00:00",
actions="mod_vlan_vid:%s,output:%s" %
(lvid, self.patch_int_ofport))
else:
LOG.error("Cannot provision GRE network for net-id=%s "
"- tunneling disabled", net_uuid)
elif network_type == constants.TYPE_VXLAN:
if self.enable_tunneling:
print 'before port create'
if not self.tun_br.get_vxlan_ofport(segmentation_id):
self.tun_br.add_vxlan_tunnel_port(segmentation_id,
self.mcast_ip,
self.vxlan_udp_port)
vxlan_ofport = self.tun_br.get_vxlan_ofport(segmentation_id)
if vxlan_ofport:
# outbound
self.tun_br.add_flow(priority=4,
in_port=self.patch_int_ofport,
dl_vlan=lvid,
actions="strip_vlan,output:%s"
% vxlan_ofport)
# inbound bcast/mcast
self.tun_br.add_flow(priority=3, tun_id=segmentation_id,
dl_dst=
"01:00:00:00:00:00/01:00:00:00:00:00",
actions="mod_vlan_vid:%s,output:%s" %
(lvid, self.patch_int_ofport))
else:
LOG.error("Cannot find VXLAN port 'vxlan-%d' "
"network for net-id=%s ", net_uuid,
segmentation_id)
else:
LOG.error("Cannot provision VXLAN network for net-id=%s "
"- tunneling disabled", net_uuid)
elif network_type == constants.TYPE_FLAT:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="strip_vlan,normal")
# inbound
self.int_br.add_flow(priority=3,
in_port=
self.int_ofports[physical_network],
dl_vlan=0xffff,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error("Cannot provision flat network for net-id=%s "
"- no bridge for physical_network %s", net_uuid,
physical_network)
elif network_type == constants.TYPE_VLAN:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="mod_vlan_vid:%s,normal" % segmentation_id)
# inbound
self.int_br.add_flow(priority=3,
in_port=self.
int_ofports[physical_network],
dl_vlan=segmentation_id,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error("Cannot provision VLAN network for net-id=%s "
"- no bridge for physical_network %s", net_uuid,
physical_network)
elif network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error("Cannot provision unknown network type %s for "
"net-id=%s", network_type, net_uuid)
def reclaim_local_vlan(self, net_uuid, lvm):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
:param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id,
vif_ids) mapping.'''
LOG.info("Reclaiming vlan = %s from net-id = %s", lvm.vlan, net_uuid)
if lvm.network_type in [constants.TYPE_GRE, constants.TYPE_VXLAN]:
if self.enable_tunneling:
self.tun_br.delete_flows(tun_id=lvm.segmentation_id)
self.tun_br.delete_flows(dl_vlan=lvm.vlan)
if lvm.network_type == constants.TYPE_VXLAN:
self.tun_br.delete_vxlan_tunnel_port(lvm.segmentation_id)
elif lvm.network_type == constants.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=0xffff)
elif lvm.network_type == constants.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=lvm.segmentation_id)
elif lvm.network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error("Cannot reclaim unknown network type %s for net-id=%s",
lvm.network_type, net_uuid)
del self.local_vlan_map[net_uuid]
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network, segmentation_id):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if net_uuid not in self.local_vlan_map:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
if self.enable_tunneling:
if network_type == constants.TYPE_GRE:
# inbound unicast
self.tun_br.add_flow(priority=3, tun_id=segmentation_id,
dl_dst=port.vif_mac,
actions="mod_vlan_vid:%s,normal" %
lvm.vlan)
if network_type == constants.TYPE_VXLAN:
# inbound unicast
action_str= "mod_vlan_vid:%s,output:%s" % \
(lvm.vlan, self.patch_int_ofport)
self.tun_br.add_flow(priority=3, tun_id=segmentation_id,
dl_dst=port.vif_mac,
actions=action_str)
self.int_br.set_db_attribute("Port", port.port_name, "tag",
str(lvm.vlan))
if int(port.ofport) != -1:
self.int_br.delete_flows(in_port=port.ofport)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info('port_unbound() net_uuid %s not in local_vlan_map',
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if lvm.network_type == [constants.TYPE_GRE, constants.TYPE_VXLAN]:
if self.enable_tunneling:
# remove inbound unicast flow
self.tun_br.delete_flows(tun_id=lvm.segmentation_id,
dl_dst=lvm.vif_ports[vif_id].vif_mac)
if vif_id in lvm.vif_ports:
del lvm.vif_ports[vif_id]
else:
LOG.info('port_unbound: vif_id %s not in local_vlan_map', vif_id)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid, lvm)
def port_dead(self, port):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.'''
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG)
self.int_br.add_flow(priority=2, in_port=port.ofport, actions="drop")
def setup_integration_br(self, integ_br):
'''Setup the integration bridge.
Create patch ports and remove all existing flows.
:param integ_br: the name of the integration bridge.'''
self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper)
self.int_br.delete_port("patch-tun")
self.int_br.remove_all_flows()
# switch all traffic using L2 learning
self.int_br.add_flow(priority=1, actions="normal")
def setup_tunnel_br(self, tun_br):
'''Setup the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br: the name of the tunnel bridge.'''
self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper)
self.tun_br.reset_bridge()
self.patch_tun_ofport = self.int_br.add_patch_port("patch-tun",
"patch-int")
self.patch_int_ofport = self.tun_br.add_patch_port("patch-int",
"patch-tun")
if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0:
LOG.error("Failed to create OVS patch port. Cannot have tunneling "
"enabled on this agent, since this version of OVS does "
"not support tunnels or patch ports. "
"Agent terminated!")
exit(1)
self.tun_br.remove_all_flows()
self.tun_br.add_flow(priority=1, actions="drop")
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths.
:param bridge_mappings: map physical network names to bridge names.'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper(self.root_helper)
for physical_network, bridge in bridge_mappings.iteritems():
# setup physical bridge
if not ip_lib.device_exists(bridge, self.root_helper):
LOG.error("Bridge %s for physical network %s does not exist. "
"Agent terminated!",
bridge, physical_network)
sys.exit(1)
br = ovs_lib.OVSBridge(bridge, self.root_helper)
br.remove_all_flows()
br.add_flow(priority=1, actions="normal")
self.phys_brs[physical_network] = br
# create veth to patch physical bridge with integration bridge
int_veth_name = constants.VETH_INTEGRATION_PREFIX + bridge
self.int_br.delete_port(int_veth_name)
phys_veth_name = constants.VETH_PHYSICAL_PREFIX + bridge
br.delete_port(phys_veth_name)
if ip_lib.device_exists(int_veth_name, self.root_helper):
ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete()
int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name,
phys_veth_name)
self.int_ofports[physical_network] = self.int_br.add_port(int_veth)
self.phys_ofports[physical_network] = br.add_port(phys_veth)
# block all untranslated traffic over veth between bridges
self.int_br.add_flow(priority=2,
in_port=self.int_ofports[physical_network],
actions="drop")
br.add_flow(priority=2,
in_port=self.phys_ofports[physical_network],
actions="drop")
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
def manage_tunnels(self, tunnel_ips, old_tunnel_ips, db):
if self.local_ip in tunnel_ips:
tunnel_ips.remove(self.local_ip)
else:
db.ovs_tunnel_ips.insert(ip_address=self.local_ip)
new_tunnel_ips = tunnel_ips - old_tunnel_ips
if new_tunnel_ips:
LOG.info("Adding GRE tunnels to: %s", new_tunnel_ips)
for ip in new_tunnel_ips:
tun_name = "gre-" + str(self.tunnel_count)
self.tun_br.add_gre_tunnel_port(tun_name, ip)
self.tunnel_count += 1
def rollback_until_success(self, db):
while True:
time.sleep(self.reconnect_interval)
try:
db.rollback()
break
except:
LOG.exception("Problem connecting to database")
def db_loop(self, db_connection_url):
'''Main processing loop for Tunneling Agent.
:param options: database information - in the event need to reconnect
'''
old_local_bindings = {}
old_vif_ports = {}
old_tunnel_ips = set()
db = sqlsoup.SqlSoup(db_connection_url)
LOG.info("Connecting to database \"%s\" on %s",
db.engine.url.database, db.engine.url.host)
while True:
try:
all_bindings = dict((p.id, Port(p))
for p in db.ports.all())
all_bindings_vif_port_ids = set(all_bindings)
net_bindings = dict((bind.network_id, bind)
for bind in
db.ovs_network_bindings.all())
if self.enable_tunneling:
tunnel_ips = set(x.ip_address for x in
db.ovs_tunnel_ips.all())
if self.tenant_network_type == constants.TYPE_GRE:
self.manage_tunnels(tunnel_ips, old_tunnel_ips, db)
# Get bindings from OVS bridge.
vif_ports = self.int_br.get_vif_ports()
new_vif_ports = dict([(p.vif_id, p) for p in vif_ports])
new_vif_ports_ids = set(new_vif_ports.keys())
old_vif_ports_ids = set(old_vif_ports.keys())
dead_vif_ports_ids = (new_vif_ports_ids -
all_bindings_vif_port_ids)
dead_vif_ports = [new_vif_ports[p] for p in dead_vif_ports_ids]
disappeared_vif_ports_ids = (old_vif_ports_ids -
new_vif_ports_ids)
new_local_bindings_ids = (all_bindings_vif_port_ids.
intersection(new_vif_ports_ids))
new_local_bindings = dict([(p, all_bindings.get(p))
for p in new_vif_ports_ids])
new_bindings = set(
(p, old_local_bindings.get(p),
new_local_bindings.get(p)) for p in new_vif_ports_ids)
changed_bindings = set([b for b in new_bindings
if b[2] != b[1]])
LOG.debug('all_bindings: %s', all_bindings)
LOG.debug('net_bindings: %s', net_bindings)
LOG.debug('new_vif_ports_ids: %s', new_vif_ports_ids)
LOG.debug('dead_vif_ports_ids: %s', dead_vif_ports_ids)
LOG.debug('old_vif_ports_ids: %s', old_vif_ports_ids)
LOG.debug('new_local_bindings_ids: %s',
new_local_bindings_ids)
LOG.debug('new_local_bindings: %s', new_local_bindings)
LOG.debug('new_bindings: %s', new_bindings)
LOG.debug('changed_bindings: %s', changed_bindings)
# Take action.
for p in dead_vif_ports:
LOG.info("No quantum binding for port " + str(p)
+ "putting on dead vlan")
self.port_dead(p)
for b in changed_bindings:
port_id, old_port, new_port = b
p = new_vif_ports[port_id]
if old_port:
old_net_uuid = old_port.network_id
LOG.info("Removing binding to net-id = " +
old_net_uuid + " for " + str(p)
+ " added to dead vlan")
self.port_unbound(p.vif_id, old_net_uuid)
if p.vif_id in all_bindings:
all_bindings[p.vif_id].status = (
q_const.PORT_STATUS_DOWN)
if not new_port:
self.port_dead(p)
if new_port:
new_net_uuid = new_port.network_id
if new_net_uuid not in net_bindings:
LOG.warn("No network binding found for net-id"
" '%s'", new_net_uuid)
continue
bind = net_bindings[new_net_uuid]
self.port_bound(p, new_net_uuid,
bind.network_type,
bind.physical_network,
bind.segmentation_id)
all_bindings[p.vif_id].status = (
q_const.PORT_STATUS_ACTIVE)
LOG.info("Port %s on net-id = %s bound to %s ",
str(p), new_net_uuid,
str(self.local_vlan_map[new_net_uuid]))
for vif_id in disappeared_vif_ports_ids:
LOG.info("Port Disappeared: " + vif_id)
if vif_id in all_bindings:
all_bindings[vif_id].status = (
q_const.PORT_STATUS_DOWN)
old_port = old_local_bindings.get(vif_id)
if old_port:
self.port_unbound(vif_id, old_port.network_id)
# commit any DB changes and expire
# data loaded from the database
db.commit()
# sleep and re-initialize state for next pass
time.sleep(self.polling_interval)
if self.enable_tunneling:
old_tunnel_ips = tunnel_ips
old_vif_ports = new_vif_ports
old_local_bindings = new_local_bindings
except:
LOG.exception("Main-loop Exception:")
self.rollback_until_success(db)
def update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up):
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id)
else:
self.port_dead(vif_port)
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
def treat_devices_added(self, devices):
resync = False
for device in devices:
LOG.info("Port %s added", device)
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for %s: %s", device, e)
resync = True
continue
port = self.int_br.get_vif_port_by_id(details['device'])
if 'port_id' in details:
LOG.info("Port %s updated. Details: %s", device, details)
self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'])
else:
LOG.debug("Device %s not defined on plugin", device)
if (port and int(port.ofport) != -1):
self.port_dead(port)
return resync
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info("Attachment %s removed", device)
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug("port_removed failed for %s: %s", device, e)
resync = True
if details['exists']:
LOG.info("Port %s updated.", device)
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", device)
self.port_unbound(device)
return resync
def process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self.treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self.treat_devices_removed(port_info['removed'])
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def tunnel_sync(self):
resync = False
try:
details = self.plugin_rpc.tunnel_sync(self.context, self.local_ip)
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
tun_name = 'gre-%s' % tunnel['id']
self.tun_br.add_gre_tunnel_port(tun_name,
tunnel['ip_address'])
except Exception as e:
LOG.debug("Unable to sync tunnel IP %s: %s", self.local_ip, e)
resync = True
return resync
def rpc_loop(self):
sync = True
ports = set()
tunnel_sync = True
while True:
try:
start = time.time()
if sync:
LOG.info("Agent out of sync with plugin!")
ports.clear()
sync = False
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
if self.tenant_network_type == constants.TYPE_GRE:
LOG.info("Agent tunnel out of sync with plugin!")
tunnel_sync = self.tunnel_sync()
port_info = self.update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug("Agent loop has new devices!")
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info)
ports = port_info['current']
except:
LOG.exception("Error in agent event loop")
sync = True
tunnel_sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval (%s vs. %s)!",
self.polling_interval, elapsed)
def daemon_loop(self, db_connection_url):
if self.rpc:
self.rpc_loop()
else:
self.db_loop(db_connection_url)
def main():
eventlet.monkey_patch()
cfg.CONF(args=sys.argv, project='quantum')
# (TODO) gary - swap with common logging
logging_config.setup_logging(cfg.CONF)
integ_br = cfg.CONF.OVS.integration_bridge
db_connection_url = cfg.CONF.DATABASE.sql_connection
polling_interval = cfg.CONF.AGENT.polling_interval
reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
root_helper = cfg.CONF.AGENT.root_helper
rpc = cfg.CONF.AGENT.rpc
tun_br = cfg.CONF.OVS.tunnel_bridge
local_ip = cfg.CONF.OVS.local_ip
enable_tunneling = cfg.CONF.OVS.enable_tunneling
tenant_network_type = cfg.CONF.OVS.tenant_network_type
mcast_ip = cfg.CONF.OVS.mcast_ip
mcast_routing_interface = cfg.CONF.OVS.mcast_routing_interface
vxlan_udp_port = cfg.CONF.OVS.vxlan_udp_port
if enable_tunneling:
if tenant_network_type == constants.TYPE_GRE:
if not local_ip:
LOG.error("GRE tunnelling cannot be enabled "
"without a valid local_ip.")
sys.exit(1)
elif tenant_network_type == constants.TYPE_VXLAN:
if not mcast_ip or not mcast_routing_interface:
LOG.error("VXLAN tunnelling cannot be enabled "
"without muliticast ip and "
"interface to add route for multicast ip")
sys.exit(1)
try:
bridge_mappings = q_utils.parse_mappings(cfg.CONF.OVS.bridge_mappings)
except ValueError as e:
LOG.error(_("Parsing bridge mappings failed: %s."
" Agent terminated!"), e)
sys.exit(1)
LOG.info(_("Bridge mappings: %s") % bridge_mappings)
LOG.info("db_connection: %s "
"integ_br: %s, tun_br: %s, local_ip: %s, bridge_mappings: %s "
"reconnect_interval: %s, enable_tunneling: %s, mcast_ip: %s"
"mcast route interface: %s",
db_connection_url, integ_br, tun_br, local_ip, bridge_mappings,
reconnect_interval, enable_tunneling, mcast_ip,
mcast_routing_interface)
plugin = OVSQuantumAgent(integ_br, tun_br, local_ip, bridge_mappings,
root_helper, polling_interval,
reconnect_interval, rpc, enable_tunneling,
tenant_network_type, mcast_ip, vxlan_udp_port,
mcast_routing_interface)
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
plugin.daemon_loop(db_connection_url)
sys.exit(0)
if __name__ == "__main__":
main()
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib
from urllib import unquote
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.utils import ismount, split_path
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed
constraints_conf = ConfigParser()
constraints_conf.read('/etc/swift/swift.conf')
def constraints_conf_int(name, default):
try:
return int(constraints_conf.get('swift-constraints', name))
except (NoSectionError, NoOptionError):
return default
#: Max file size allowed for objects
MAX_FILE_SIZE = constraints_conf_int('max_file_size',
5368709122) # 5 * 1024 * 1024 * 1024 + 2
#: Max length of the name of a key for metadata
MAX_META_NAME_LENGTH = constraints_conf_int('max_meta_name_length', 128)
#: Max length of the value of a key for metadata
MAX_META_VALUE_LENGTH = constraints_conf_int('max_meta_value_length', 256)
#: Max number of metadata items
MAX_META_COUNT = constraints_conf_int('max_meta_count', 90)
#: Max overall size of metadata
MAX_META_OVERALL_SIZE = constraints_conf_int('max_meta_overall_size', 4096)
#: Max size of any header
MAX_HEADER_SIZE = constraints_conf_int('max_header_size', 8192)
#: Max object name length
MAX_OBJECT_NAME_LENGTH = constraints_conf_int('max_object_name_length', 1024)
#: Max object list length of a get request for a container
CONTAINER_LISTING_LIMIT = constraints_conf_int('container_listing_limit',
10000)
#: Max container list length of a get request for an account
ACCOUNT_LISTING_LIMIT = constraints_conf_int('account_listing_limit', 10000)
#: Max account name length
MAX_ACCOUNT_NAME_LENGTH = constraints_conf_int('max_account_name_length', 256)
#: Max container name length
MAX_CONTAINER_NAME_LENGTH = constraints_conf_int('max_container_name_length',
256)
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in req.headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
return HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns HTTPRequestEntityTooLarge: the object is too large
:returns HTTPLengthRequired: missing content-length header and not
a chunked request
:returns HTTPBadRequest: missing or bad content-type header, or
bad metadata
"""
if req.content_length and req.content_length > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(request=req)
if 'X-Copy-From' in req.headers and req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte body',
request=req, content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body='No content type')
if not check_utf8(req.headers['Content-Type']):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
path = os.path.join(root, drive)
return ismount(path)
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, unicode):
string.encode('utf-8')
else:
string.decode('UTF-8')
return '\x00' not in string
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
def check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise: HTTPPreconditionFailed if x-copy-from value
is not well formatted.
"""
src_header = unquote(req.headers.get('X-Copy-From'))
if not src_header.startswith('/'):
src_header = '/' + src_header
try:
return split_path(src_header, 2, 2, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
| |
import requests
from flask import json
from Norman import settings
from Norman.api.base import base
from Norman.errors import HttpError
from Norman.messenger.userProfile import Profile
from Norman.norman.user import NormanUser
from Norman.settings import FBConfig, MessageConfig, ServiceListConfig
from Norman.utils import response
graphAPIURL = FBConfig.GRAPH_API_URL.replace('<action>', '/me/messages?')
class Message(object):
def __init__(self, recipient_id, **kwargs):
self.recipient_id = recipient_id
self.notification_type = None
self.payload_structure = {
'recipient': {
'id': self.recipient_id
},
'message': {
'text': '',
'attachment': {
'type': '',
'payload': {
'template_type': '',
'text': '',
'buttons': ''
},
},
'quick_replies': []
},
'sender_action': '',
'notification_type': ''
}
self.user_profile = Profile()
def send_action(self, action):
"""
:param action: - typing_on, typing_off, mark_as_read
"""
# clean up payload
# self.payload_structure.pop('message')
self.payload_structure.pop('notification_type')
self.payload_structure['sender_action'] = action
# connect
request = base.exec_request('POST', graphAPIURL, data=self.payload_structure)
if request:
return request
else:
raise HttpError('Unable to complete request.')
@staticmethod
def show_typing(recipient_id, action='typing_on'):
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": settings.FBConfig.FACEBOOK_SECRET_KEY},
data=json.dumps({
"recipient": {"id": recipient_id},
"sender_action": action
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
return response.response_ok('Success')
def send_message(self, message_type, message_text=None, attachment=None, notification_type=None, quick_replies=None):
"""
- text must be UTF-8 and has a 640 character limit
- You cannot send a text and an attachment together
:param quick_replies: a list of quick responses sent along with the message to the user
:param message_type: text or attachment
:param message_text: text to send
:param attachment: a valid attachment object i.e dictionary
:param notification_type: REGULAR, SILENT_PUSH, or NO_PUSH
:return: json response object
"""
notification_type = notification_type
quick_replies = quick_replies
if message_type == "text":
self.payload_structure['message']['text'] = message_text
try:
self.payload_structure['message'].pop('attachment')
except KeyError:
pass
else:
try:
self.payload_structure['message'].pop('text')
except KeyError:
pass
self.payload_structure['message']['attachment'] = attachment
# clean up payload
try:
self.payload_structure.pop('sender_action')
except KeyError:
pass
if quick_replies:
self.payload_structure['message']['quick_replies'] = quick_replies
else:
try:
self.payload_structure['message'].pop('quick_replies')
except KeyError:
pass
if notification_type:
self.payload_structure['notification_type'] = notification_type
else:
try:
self.payload_structure.pop('notification_type')
except KeyError:
pass
# connect
request = base.exec_request('POST', graphAPIURL, data=self.payload_structure)
if request:
return request
else:
raise HttpError('Unable to complete request.')
class Template(Message):
def __init__(self, recipient_id, **kwargs):
super(Template, self).__init__(recipient_id, **kwargs)
self.payload_structure['message']["attachment"]["type"] = "template"
self.payload_structure['message']["attachment"]["payload"]["buttons"] = {}
self.payload_structure['message']["attachment"]["payload"]["elements"] = [{
'title': "",
'image_url': "",
'subtitle': "",
'default_action': {
'type': '',
'url': '',
'messenger_extensions': '',
'webview_height_ratio': '',
'fallback_url': ''
},
'buttons': {
'title': '',
'type': '',
'url': '',
'messenger_extensions': '',
'webview_height_ratio': '',
'fallback_url': ''
}
}]
def send_template_message(self, template_type, **kwargs):
self.payload_structure["message"]["attachment"]["payload"]["template_type"] = template_type
if template_type == "button":
self.payload_structure['message']["attachment"]["payload"]["text"] = kwargs.get('text')
self.payload_structure['message']['attachment']['payload'].pop('elements')
elif template_type == 'generic':
self.payload_structure['message']["attachment"]["payload"]['elements'][0] = kwargs.get('generic_info')
elif template_type == 'list':
self.payload_structure['message']["attachment"]["payload"]['elements'] = kwargs.get('list_info')
self.payload_structure['message']['attachment']['payload'].pop('text')
if kwargs.get("buttons"):
self.payload_structure['message']["attachment"]["payload"]['buttons'] = [kwargs.get('buttons')]
else:
try:
self.payload_structure.pop('buttons')
except KeyError:
pass
# clean up payload
self.payload_structure.pop('sender_action')
notification_type = kwargs.get('notification_type')
if notification_type:
self.payload_structure['notification_type'] = notification_type
else:
self.payload_structure.pop('notification_type')
quick_replies = kwargs.get('notification_type')
if quick_replies:
self.payload_structure['quick_replies'] = quick_replies
else:
self.payload_structure['message'].pop('quick_replies')
self.payload_structure['message'].pop('text')
print(self.payload_structure)
request = base.exec_request('POST', graphAPIURL, data=self.payload_structure)
if request:
return request
else:
raise HttpError('Unable to complete request.')
class PostBackMessages(Template):
def __init__(self, recipient_id, **kwargs):
super(PostBackMessages, self).__init__(recipient_id, **kwargs)
self.recipient_id = recipient_id
self.temp_user = None
self.current_user = NormanUser()
self.user_details = self.user_profile.get_user_details(recipient_id)
def handle_get_started(self, registered):
message_text = MessageConfig.GET_STARTED_MESSAGE['registered'] if registered else MessageConfig.GET_STARTED_MESSAGE['registered']
message_text = message_text.replace('<username>', self.user_details['first_name'])
quick_replies = [
{"content_type": "text", "title": "What does that mean?", "payload": "NORMAN_GET_STARTED_MEANING"},
{"content_type": "text", "title": "How do you do that?", "payload": "NORMAN_GET_STARTED_HOW"},
]
return self.send_message("text", message_text=message_text, quick_replies=quick_replies)
def handle_get_started_meaning(self, registered):
message_text = MessageConfig.GET_STARTED_MEANING
quick_replies = [
{"content_type": "text", "title": "How do you do that?", "payload": "NORMAN_GET_STARTED_HOW"},
{"content_type": "text", "title": "What services do you offer?", "payload": "NORMAN_GET_ALL_SERVICE_LIST"}
]
return self.send_message("text", message_text=message_text, quick_replies=quick_replies)
def handle_get_started_how(self, registered):
message_text = MessageConfig.GET_STARTED_HOW
quick_replies = [
{"content_type": "text", "title": "What services do you offer?", "payload": "NORMAN_GET_ALL_SERVICE_LIST"},
{"content_type": "text", "title": "Nice", "payload": "GOOD_TO_GO"},
{"content_type": "text", "title": "I'm still confused",
"payload": "NORMAN_GET_HELP"}
]
return self.send_message("text", message_text=message_text, quick_replies=quick_replies)
def get_started_service_list(self, registered):
print('i got')
# self.send_message("text", message_text="Here are the services we offer")
self.send_template_message(template_type='list', list_info=[ServiceListConfig.messaging,
ServiceListConfig.reminder,
ServiceListConfig.emergency,
ServiceListConfig.scheduling
])
message_text = MessageConfig.GET_ALL_SERVICE_LIST.replace('<username>', self.user_details['first_name'])
quick_replies = [
{"content_type": "text", "title": "Nice", "payload": "GOOD_TO_GO"},
{"content_type": "text", "title": "I'm still confused",
"payload": "NORMAN_GET_HELP"}
]
return self.send_message("text", message_text=message_text, quick_replies=quick_replies)
def handle_help(self, registered):
message_text = MessageConfig.GET_HELP_MESSAGE.replace('<username>', self.user_details['first_name'])
quick_replies = [
{"content_type": "text", "title": "Tell Me About You", "payload": "NORMAN_GET_STARTED_PAYLOAD"},
{"content_type": "text", "title": "Leave a Message", "payload": "NORMAN_LEAVE_MESSAGE"},
{"content_type": "text", "title": "Set Reminder", "payload": "NORMAN_SET_REMINDER"},
{"content_type": "text", "title": "Request Urgent Help", "payload": "NORMAN_REQUEST_URGENT_HELP"},
{"content_type": "text", "title": "Book an Appointment","payload": "NORMAN_BOOK_APPOINTMENT"}
]
return self.send_message("text", message_text=message_text,quick_replies=quick_replies)
def good_to_go(self, registered):
message_text = "Awesome {0}".format(MessageConfig.EMOJI_DICT['HAPPY_SMILE'])
self.send_message("text", message_text=message_text)
return self.beyondGetStarted(registered)
def beyondGetStarted(self,registered):
if self.current_user.is_from_ref_id:
message_text = MessageConfig.COMING_FROM_HOSPITAL
self.show_typing('typing_on')
self.show_typing('typing_off')
self.send_message('text', message_text)
self.show_typing('typing_on')
self.show_typing('typing_off')
return self.send_message('text', MessageConfig.TIME_TO_SET_UP)
else:
return self.handle_first_time_temp_user(registered)
def handle_first_time_temp_user(self, registered):
for statement in MessageConfig.FIRST_TIME_TEMP_USER:
self.send_message('text', statement)
text = "While you can enjoy some of my services as a free user," + \
"to enjoy the best of my features, you need to be registered to an hospital."
self.send_message("text", message_text=text)
quick_replies = [
{"content_type": "text", "title": "Continue as a free user", "payload": "GOOD_TO_GO_FREE"},
{"content_type": "text", "title": "Inform your hospital about Norman", "payload": "GET_NEARBY_HOSPITAL"},
{"content_type": "text", "title": "View registered hospitals", "payload": "GET_NEARBY_HOSPITAL"}
]
second_text = "As a free user, you can go on and"
return self.send_message("text", message_text=second_text, quick_replies=quick_replies)
def good_to_go_free(self, registered):
message_text = "Sweet. You're all setup up now to use Norman as a free user."
self.send_message('text', message_text=message_text)
options = "You can either send a message, or try one of the options below"
quick_replies = [
{"content_type": "text", "title": "Drug Use Reminder", "payload": "DRUG_USE_REMINDER"},
{"content_type": "text", "title": "Emergency Service", "payload": "NORMAN_REQUEST_URGENT_HELP"},
{"content_type": "text", "title": "Drug Purchase", "payload": "DRUG_PURCHASE"}
]
return self.send_message('text', message_text=options, quick_replies=quick_replies)
| |
import sys
import textwrap
import pytest
import salt.modules.state as state
import salt.state
import salt.utils.files
import salt.utils.json
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.skipif(
sys.version_info < (3, 6), reason="Dictionaries are not ordered under Py3.5"
),
]
@pytest.fixture
def configure_loader_modules(salt_minion_factory):
return {
state: {
"__opts__": salt_minion_factory.config.copy(),
"__salt__": {"saltutil.is_running": MagicMock(return_value=[])},
},
}
@pytest.fixture
def cachedir(tmp_path):
path = tmp_path / "cache"
path.mkdir()
return path
@pytest.fixture
def fileserver_root(tmp_path):
path = tmp_path / "fileserver-root"
path.mkdir()
return path
@pytest.fixture
def saltenvs():
return ["base", "foo", "bar", "baz"]
@pytest.fixture
def saltenv_roots(fileserver_root, saltenvs):
return {env: fileserver_root / env for env in saltenvs}
@pytest.fixture
def base_top_file(saltenv_roots):
return str(saltenv_roots["base"] / "top.sls")
@pytest.fixture
def dunder_opts(saltenv_roots, saltenvs):
return {
"file_client": "local",
"default_top": "base",
"env_order": saltenv_roots,
"file_roots": {
"base": [str(saltenv_roots["base"])],
"foo": [str(saltenv_roots["foo"])],
"bar": [str(saltenv_roots["bar"])],
"baz": [str(saltenv_roots["baz"])],
},
}
@pytest.fixture(autouse=True)
def state_tree(saltenv_roots, saltenvs):
# Write top files for all but the "baz" environment
for env, path in saltenv_roots.items():
path.mkdir()
if env == "baz":
continue
top_file = path / "top.sls"
with salt.utils.files.fopen(str(top_file), "w") as fp_:
# Add a section for every environment to each top file, with
# the SLS target prefixed with the current saltenv.
for env_name in saltenvs:
fp_.write(
textwrap.dedent(
"""\
{env_name}:
'*':
- {saltenv}_{env_name}
""".format(
env_name=env_name, saltenv=env
)
)
)
@pytest.fixture
def limited_base_top_file(state_tree, base_top_file):
with salt.utils.files.fopen(base_top_file, "w") as fp_:
fp_.write(
textwrap.dedent(
"""\
base:
'*':
- base_base
"""
)
)
def show_top(dunder_opts, **kwargs):
dunder_opts.update(kwargs)
with patch.dict(state.__opts__, dunder_opts), patch.object(
salt.state.State, "_gather_pillar", MagicMock(return_value={})
):
ret = state.show_top()
return ret
def test_merge_strategy_merge(dunder_opts):
"""
Base overrides everything
"""
ret = show_top(dunder_opts, top_file_merging_strategy="merge")
assert ret == {
"base": ["base_base"],
"foo": ["base_foo"],
"bar": ["base_bar"],
"baz": ["base_baz"],
}
@pytest.mark.usefixtures("limited_base_top_file")
def test_merge_strategy_merge_limited_base(dunder_opts, base_top_file):
"""
Test with a "base" top file containing only a "base" section. The "baz"
saltenv should not be in the return data because that env doesn't have
its own top file and there will be no "baz" section in the "base" env's
top file.
Next, append a "baz" section to the rewritten top file and we should
get results for that saltenv in the return data.
"""
ret = show_top(dunder_opts, top_file_merging_strategy="merge")
assert ret == {
"base": ["base_base"],
"foo": ["foo_foo"],
"bar": ["bar_bar"],
}
# Add a "baz" section
with salt.utils.files.fopen(base_top_file, "a") as fp_:
fp_.write(
textwrap.dedent(
"""\
baz:
'*':
- base_baz
"""
)
)
ret = show_top(dunder_opts, top_file_merging_strategy="merge")
assert ret == {
"base": ["base_base"],
"foo": ["foo_foo"],
"bar": ["bar_bar"],
"baz": ["base_baz"],
}
def test_merge_strategy_merge_state_top_saltenv_base(dunder_opts):
"""
This tests with state_top_saltenv=base, which should pull states *only*
from the base saltenv.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="merge", state_top_saltenv="base"
)
assert ret == {
"base": ["base_base"],
"foo": ["base_foo"],
"bar": ["base_bar"],
"baz": ["base_baz"],
}
def test_merge_strategy_merge_state_top_saltenv_foo(dunder_opts):
"""
This tests with state_top_saltenv=foo, which should pull states *only*
from the foo saltenv. Since that top file is only authoritative for
its own saltenv, *only* the foo saltenv's matches from the foo top file
should be in the return data.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="merge", state_top_saltenv="foo"
)
assert ret == {"foo": ["foo_foo"]}
def test_merge_strategy_merge_all(dunder_opts):
"""
Include everything in every top file
"""
ret = show_top(dunder_opts, top_file_merging_strategy="merge_all")
assert ret == {
"base": ["base_base", "foo_base", "bar_base"],
"foo": ["base_foo", "foo_foo", "bar_foo"],
"bar": ["base_bar", "foo_bar", "bar_bar"],
"baz": ["base_baz", "foo_baz", "bar_baz"],
}
def test_merge_strategy_merge_all_alternate_env_order(dunder_opts):
"""
Use an alternate env_order. This should change the order in which the
SLS targets appear in the result.
"""
ret = show_top(
dunder_opts,
top_file_merging_strategy="merge_all",
env_order=["bar", "foo", "base"],
)
assert ret == {
"base": ["bar_base", "foo_base", "base_base"],
"foo": ["bar_foo", "foo_foo", "base_foo"],
"bar": ["bar_bar", "foo_bar", "base_bar"],
"baz": ["bar_baz", "foo_baz", "base_baz"],
}
def test_merge_strategy_merge_all_state_top_saltenv_base(dunder_opts):
"""
This tests with state_top_saltenv=base, which should pull states *only*
from the base saltenv. Since we are using the "merge_all" strategy, all
the states from that top file should be in the return data.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="merge_all", state_top_saltenv="base"
)
assert ret == {
"base": ["base_base"],
"foo": ["base_foo"],
"bar": ["base_bar"],
"baz": ["base_baz"],
}
def test_merge_strategy_merge_all_state_top_saltenv_foo(dunder_opts):
"""
This tests with state_top_saltenv=foo, which should pull states *only*
from the foo saltenv. Since we are using the "merge_all" strategy, all
the states from that top file should be in the return data.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="merge_all", state_top_saltenv="foo"
)
assert ret == {
"base": ["foo_base"],
"foo": ["foo_foo"],
"bar": ["foo_bar"],
"baz": ["foo_baz"],
}
def test_merge_strategy_same(dunder_opts):
"""
Each env should get its SLS targets from its own top file, with the
"baz" env pulling from "base" since default_top=base and there is no
top file in the "baz" saltenv.
"""
ret = show_top(dunder_opts, top_file_merging_strategy="same")
assert ret == {
"base": ["base_base"],
"foo": ["foo_foo"],
"bar": ["bar_bar"],
"baz": ["base_baz"],
}
@pytest.mark.usefixtures("limited_base_top_file")
def test_merge_strategy_same_limited_base(dunder_opts):
"""
Each env should get its SLS targets from its own top file, with the
"baz" env pulling from "base" since default_top=base and there is no
top file in the "baz" saltenv.
"""
ret = show_top(dunder_opts, top_file_merging_strategy="same")
assert ret == {
"base": ["base_base"],
"foo": ["foo_foo"],
"bar": ["bar_bar"],
}
def test_merge_strategy_same_default_top_foo(dunder_opts):
"""
Each env should get its SLS targets from its own top file, with the
"baz" env pulling from "foo" since default_top=foo and there is no top
file in the "baz" saltenv.
"""
ret = show_top(dunder_opts, top_file_merging_strategy="same", default_top="foo")
assert ret == {
"base": ["base_base"],
"foo": ["foo_foo"],
"bar": ["bar_bar"],
"baz": ["foo_baz"],
}
def test_merge_strategy_same_state_top_saltenv_base(dunder_opts):
"""
Test the state_top_saltenv parameter to load states exclusively from
the base saltenv, with the "same" merging strategy. This should
result in just the base environment's states from the base top file
being in the merged result.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="same", state_top_saltenv="base"
)
assert ret == {"base": ["base_base"]}
def test_merge_strategy_same_state_top_saltenv_foo(dunder_opts):
"""
Test the state_top_saltenv parameter to load states exclusively from
the foo saltenv, with the "same" merging strategy. This should
result in just the foo environment's states from the foo top file
being in the merged result.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="same", state_top_saltenv="foo"
)
assert ret == {"foo": ["foo_foo"]}
def test_merge_strategy_same_state_top_saltenv_baz(dunder_opts):
"""
Test the state_top_saltenv parameter to load states exclusively from
the baz saltenv, with the "same" merging strategy. This should
result in an empty dictionary since there is no top file in that
environment.
"""
ret = show_top(
dunder_opts, top_file_merging_strategy="same", state_top_saltenv="baz"
)
assert ret == {}
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Node composer instrument
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 1.1.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import datetime
import json
import logging
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Requires, \
Instantiate, Property, Validate
# Cohorte
import cohorte
import cohorte.composer
import cohorte.instruments
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
STORY_PAGE = """<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<meta charset="UTF-8">
<title>Node Composer Story</title>
<link rel="stylesheet" type="text/css" media="all"
href="{statics}/narrative/narrative.css">
<script src="{statics}/d3.min.js"></script>
<script src="{statics}/narrative/catxml.js"></script>
<script src="{statics}/narrative/narrative.js"></script>
</head>
<body>
<p id="chart" />
<script lang="javascript">
characters_xml = '{characters_xml}';
json_scenes = '{json_scenes}';
draw_chart('Composer Story', 'composer_story',
characters_xml, json_scenes,
true, false, false);
</script>
</body>
</html>
"""
SECOND_STORY_PAGE = """<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<meta charset="UTF-8">
<title>Node Composer Story</title>
<script src="{statics}/d3.min.js"></script>
<script src="{statics}/isolates_story/story.js"></script>
</head>
<body>
<p id="chart" />
<script lang="javascript">
json_data = '{json_data}';
data = JSON.parse(json_data);
draw_chart("#chart", data);
</script>
</body>
</html>
"""
ISOLATES_PAGE = """<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<meta charset="UTF-8">
<title>Node Composer Isolates</title>
<script src="{statics}/d3.min.js"></script>
<link rel="stylesheet" type="text/css" media="all"
href="{statics}/circle2/style.css">
<script src="{statics}/circle2/circle.js"></script>
</head>
<body>
<script lang="javascript">
json_distribution = '{json_distribution}';
draw_chart(json_distribution);
</script>
</body>
</html>
"""
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.instruments.SERVICE_INSTRUMENT_UI)
@Requires('_history', cohorte.composer.SERVICE_HISTORY_NODE)
@Property('_name', cohorte.instruments.PROP_INSTRUMENT_NAME, 'node')
@Instantiate('instrument-node')
class NodeComposerInstrument(cohorte.instruments.CommonHttp):
"""
Prints information about the node composer
"""
def __init__(self):
"""
Sets up members
"""
# Parent initialization
super(NodeComposerInstrument, self).__init__()
# Composer history
self._history = None
# Node name
self._node_name = None
# Instrument name
self._name = None
# Path -> method
self._paths = {'story': self._send_story,
'story2': self._send_second_story,
'isolates': self._send_isolates}
@Validate
def validate(self, context):
"""
Component validated
"""
self._node_name = context.get_property(cohorte.PROP_NODE_NAME)
def handle_request(self, base_path, sub_path, request, response):
"""
Handles a HTTP request
:param base_path: Path to this instrument
:param sub_path: Part of the path for this instrument
:param request: A HTTP request bean
:param response: A HTTP response bean
"""
parts = [part for part in sub_path.split('/') if part]
if not parts:
# No parameter given
self.send_index(response)
else:
try:
# Find the method handling this path...
handler = self._paths[parts[0]]
except KeyError:
# ... not found
self.page_not_found(
response, "Unknown page: {0}".format(parts[0]), self._name)
else:
# ... use it
handler(response)
def send_index(self, response):
"""
Prepares the index page
"""
# Prepare the lists of links
items_list = self.make_list(
self.make_link(name.title(), self._name, name)
for name in sorted(self._paths))
# Prepare the HTML body
body = "<h2>Node Composer pages</h2>\n{items_list}\n" \
.format(items_list=items_list)
# Send the page
response.send_content(
200, self.make_page("Cohorte Node Composer", body))
def _send_second_story(self, response):
"""
Sends a graph showing the isolate of each component during distribution
"""
hist_scenes = self._history.items()
if not hist_scenes:
# No history yet
response.send_content(200, "<p>No history yet...</p>")
return
# Get the names of all component names
all_names = self.__extract_story_characters(hist_scenes)
# Prepare temporary dictionary
data = {name: {"name": name, "scenes": []} for name in all_names}
# Store data
for timestamp, distribution in hist_scenes:
for isolate, components in distribution.items():
for name in components:
dist_time = datetime.datetime.fromtimestamp(timestamp)
data[name]['scenes'].append(
{"distribution": dist_time.strftime("%X"),
"isolate": isolate})
# Generate JSON
data_json = json.dumps(list(data.values()), indent=True)
# Escape content: must be Javascript a string declaration
data_json = data_json.replace("'", "\\'").replace("\n", " \\\n")
# Generate the page content
page = SECOND_STORY_PAGE.format(statics=self.get_statics_path(),
json_data=data_json)
response.send_content(200, page)
def _send_story(self, response):
"""
Sends an XKCD-story-like graph about components
"""
hist_scenes = self._history.items()
if not hist_scenes:
# No history yet
response.send_content(200, "<p>No history yet...</p>")
return
# Get the names of all component names
all_names = self.__extract_story_characters(hist_scenes)
# Prepare character -> ID association
characters_id = dict((name, idx)
for idx, name in enumerate(all_names))
id_characters = dict((idx, name)
for name, idx in characters_id.items())
# Prepare "character.xml"
characters_xml = '<?xml version="1.0"?>\n<characters>\n{0}\n' \
'</characters>\n'\
.format('\n'.join('\t<character group="{0}" id="{1}" name="{2}"/>'
.format(idx % 10, idx, id_characters[idx])
for idx in sorted(characters_id.values())))
# Prepare "narrative.json"
scenes = self.__make_story_scenes(hist_scenes, characters_id)
narrative_json = json.dumps({"scenes": scenes}, indent=True)
# Escape content: must be Javascript a string declaration
characters_xml = characters_xml.replace("'", "\\'") \
.replace("\n", " \\\n")
narrative_json = narrative_json.replace("'", "\\'") \
.replace("\n", " \\\n")
# Generate the page content
page = STORY_PAGE.format(statics=self.get_statics_path(),
characters_xml=characters_xml,
json_scenes=narrative_json)
response.send_content(200, page)
@staticmethod
def __extract_story_characters(history_items):
"""
Makes a sorted list of the characters in the whole history
:param history_items: Result History.items()
:return: A sorted tuple of names
"""
# Get the names of all component names
all_names = set()
for hist_scene in history_items:
# hist_scene: tuple(timestamp, {isolate -> tuple(names)})
for components_group in hist_scene[1].values():
all_names.update(components_group)
# Make a sorted tuple out of this set
return tuple(sorted(all_names))
def __make_story_scenes(self, history, characters):
"""
Prepares an array representing the scenes in the given history
:param history: A sorted history
:param characters: A Name -> CharacterID dictionary
:return: A tuple of scene dictionaries
"""
# Compute starting and ending time stamps
min_stamp = history[0][0]
max_stamp = history[-1][0]
delta = max_stamp - min_stamp
if delta == 0:
# Only one panel
def scale(stamp):
"""
No need to scale X-axis values
"""
return 1
else:
# Prepare a scaling method to have indices in [0,50]
def scale(stamp):
"""
Scales X-axis values (time stamps) in a [0,50] domain
"""
return int(((stamp - min_stamp) * 50) / delta)
scenes = []
idx = 0
for timestamp, distribution in history:
start = scale(timestamp)
for isolate, components in distribution.items():
idx += 1
scenes.append(self.__make_story_scene(
idx, isolate, start, components, characters))
return tuple(scenes)
@staticmethod
def __make_story_scene(idx, name, start, live_characters, all_characters):
"""
Prepares the dictionary that represents the given scene
:param idx: Scene ID
:param name: Name of the scene
:param start: Scene start panel
:param live_characters: List of the names of the characters in this
scene
:param all_characters: A Name -> CharacterID dictionary
"""
return {
"id": idx,
"name": name,
"duration": 1,
"start": start,
"chars": [all_characters[name] for name in live_characters],
}
def _send_isolates(self, response):
"""
Sends a circle packing graph about isolates content
"""
# Root: node name
distribution = {'name': self._node_name}
# Get the status history
hist_scenes = self._history.items()
if hist_scenes:
# History there... only use the last scene (forget the timestamp)
last_dist = hist_scenes[-1][1]
distribution['children'] = [
{'name': isolate,
'children': [{'name': name, 'size': 100}
for name in components]}
for isolate, components in last_dist.items()
]
# Escape content: must be Javascript a string declaration
json_distribution = json.dumps(distribution, indent=True)
json_distribution = json_distribution.replace("'", "\\'") \
.replace("\n", " \\\n")
# Generate the page content
page = ISOLATES_PAGE.format(statics=self.get_statics_path(),
json_distribution=json_distribution)
response.send_content(200, page)
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict, List
from pyflink.common.execution_mode import ExecutionMode
from pyflink.common.input_dependency_constraint import InputDependencyConstraint
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class
__all__ = ['ExecutionConfig']
class ExecutionConfig(object):
"""
A config to define the behavior of the program execution. It allows to define (among other
options) the following settings:
- The default parallelism of the program, i.e., how many parallel tasks to use for
all functions that do not define a specific value directly.
- The number of retries in the case of failed executions.
- The delay between execution retries.
- The :class:`ExecutionMode` of the program: Batch or Pipelined.
The default execution mode is :data:`ExecutionMode.PIPELINED`
- Enabling or disabling the "closure cleaner". The closure cleaner pre-processes
the implementations of functions. In case they are (anonymous) inner classes,
it removes unused references to the enclosing class to fix certain serialization-related
problems and to reduce the size of the closure.
- The config allows to register types and serializers to increase the efficiency of
handling *generic types* and *POJOs*. This is usually only needed
when the functions return not only the types declared in their signature, but
also subclasses of those types.
:data:`PARALLELISM_DEFAULT`:
The flag value indicating use of the default parallelism. This value can
be used to reset the parallelism back to the default state.
:data:`PARALLELISM_UNKNOWN`:
The flag value indicating an unknown or unset parallelism. This value is
not a valid parallelism and indicates that the parallelism should remain
unchanged.
"""
PARALLELISM_DEFAULT = -1
PARALLELISM_UNKNOWN = -2
def __init__(self, j_execution_config):
self._j_execution_config = j_execution_config
def enable_closure_cleaner(self) -> 'ExecutionConfig':
"""
Enables the ClosureCleaner. This analyzes user code functions and sets fields to null
that are not used. This will in most cases make closures or anonymous inner classes
serializable that where not serializable due to some Scala or Java implementation artifact.
User code must be serializable because it needs to be sent to worker nodes.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableClosureCleaner()
return self
def disable_closure_cleaner(self) -> 'ExecutionConfig':
"""
Disables the ClosureCleaner.
.. seealso:: :func:`enable_closure_cleaner`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableClosureCleaner()
return self
def is_closure_cleaner_enabled(self) -> bool:
"""
Returns whether the ClosureCleaner is enabled.
.. seealso:: :func:`enable_closure_cleaner`
:return: ``True`` means enable and ``False`` means disable.
"""
return self._j_execution_config.isClosureCleanerEnabled()
def set_auto_watermark_interval(self, interval: int) -> 'ExecutionConfig':
"""
Sets the interval of the automatic watermark emission. Watermarks are used throughout
the streaming system to keep track of the progress of time. They are used, for example,
for time based windowing.
:param interval: The integer value interval between watermarks in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setAutoWatermarkInterval(interval)
return self
def get_auto_watermark_interval(self) -> int:
"""
Returns the interval of the automatic watermark emission.
.. seealso:: :func:`set_auto_watermark_interval`
:return: The integer value interval in milliseconds of the automatic watermark emission.
"""
return self._j_execution_config.getAutoWatermarkInterval()
def set_latency_tracking_interval(self, interval: int) -> 'ExecutionConfig':
"""
Interval for sending latency tracking marks from the sources to the sinks.
Flink will send latency tracking marks from the sources at the specified interval.
Setting a tracking interval <= 0 disables the latency tracking.
:param interval: Integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setLatencyTrackingInterval(interval)
return self
def get_latency_tracking_interval(self) -> int:
"""
Returns the latency tracking interval.
:return: The latency tracking interval in milliseconds.
"""
return self._j_execution_config.getLatencyTrackingInterval()
def get_parallelism(self) -> int:
"""
Gets the parallelism with which operation are executed by default. Operations can
individually override this value to use a specific parallelism.
Other operations may need to run with a different parallelism - for example calling
a reduce operation over the entire data set will involve an operation that runs
with a parallelism of one (the final reduce to the single result value).
:return: The parallelism used by operations, unless they override that value. This method
returns :data:`ExecutionConfig.PARALLELISM_DEFAULT` if the environment's default
parallelism should be used.
"""
return self._j_execution_config.getParallelism()
def set_parallelism(self, parallelism: int) -> 'ExecutionConfig':
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as join, map, reduce) to run
with x parallel instances.
This method overrides the default parallelism for this environment.
The local execution environment uses by default a value equal to the number of hardware
contexts (CPU cores / threads). When executing the program via the command line client
from a JAR/Python file, the default parallelism is the one configured for that setup.
:param parallelism: The parallelism to use.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setParallelism(parallelism)
return self
def get_max_parallelism(self) -> int:
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_execution_config.getMaxParallelism()
def set_max_parallelism(self, max_parallelism: int) -> 'ExecutionConfig':
"""
Sets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program.
"""
self._j_execution_config.setMaxParallelism(max_parallelism)
return self
def get_task_cancellation_interval(self) -> int:
"""
Gets the interval (in milliseconds) between consecutive attempts to cancel a running task.
:return: The integer value interval in milliseconds.
"""
return self._j_execution_config.getTaskCancellationInterval()
def set_task_cancellation_interval(self, interval: int) -> 'ExecutionConfig':
"""
Sets the configuration parameter specifying the interval (in milliseconds)
between consecutive attempts to cancel a running task.
:param interval: The integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationInterval(interval)
return self
def get_task_cancellation_timeout(self) -> int:
"""
Returns the timeout (in milliseconds) after which an ongoing task
cancellation leads to a fatal TaskManager error.
The value ``0`` means that the timeout is disabled. In
this case a stuck cancellation will not lead to a fatal error.
:return: The timeout in milliseconds.
"""
return self._j_execution_config.getTaskCancellationTimeout()
def set_task_cancellation_timeout(self, timeout: int) -> 'ExecutionConfig':
"""
Sets the timeout (in milliseconds) after which an ongoing task cancellation
is considered failed, leading to a fatal TaskManager error.
The cluster default is configured via ``TaskManagerOptions#TASK_CANCELLATION_TIMEOUT``.
The value ``0`` disables the timeout. In this case a stuck
cancellation will not lead to a fatal error.
:param timeout: The task cancellation timeout (in milliseconds).
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationTimeout(timeout)
return self
def set_restart_strategy(
self,
restart_strategy_configuration: RestartStrategyConfiguration) -> 'ExecutionConfig':
"""
Sets the restart strategy to be used for recovery.
::
>>> config = env.get_config()
>>> config.set_restart_strategy(RestartStrategies.fixed_delay_restart(10, 1000))
The restart strategy configurations are all created from :class:`RestartStrategies`.
:param restart_strategy_configuration: Configuration defining the restart strategy to use.
"""
self._j_execution_config.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
return self
def get_restart_strategy(self) -> RestartStrategyConfiguration:
"""
Returns the restart strategy which has been set for the current job.
.. seealso:: :func:`set_restart_strategy`
:return: The specified restart configuration.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_execution_config.getRestartStrategy())
def set_execution_mode(self, execution_mode: ExecutionMode) -> 'ExecutionConfig':
"""
Sets the execution mode to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
Example:
::
>>> config.set_execution_mode(ExecutionMode.BATCH)
:param execution_mode: The execution mode to use. The execution mode could be
:data:`ExecutionMode.PIPELINED`,
:data:`ExecutionMode.PIPELINED_FORCED`,
:data:`ExecutionMode.BATCH` or
:data:`ExecutionMode.BATCH_FORCED`.
"""
self._j_execution_config.setExecutionMode(execution_mode._to_j_execution_mode())
return self
def get_execution_mode(self) -> 'ExecutionMode':
"""
Gets the execution mode used to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
.. seealso:: :func:`set_execution_mode`
:return: The execution mode for the program.
"""
j_execution_mode = self._j_execution_config.getExecutionMode()
return ExecutionMode._from_j_execution_mode(j_execution_mode)
def set_default_input_dependency_constraint(
self, input_dependency_constraint: InputDependencyConstraint) -> 'ExecutionConfig':
"""
Sets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
Example:
::
>>> config.set_default_input_dependency_constraint(InputDependencyConstraint.ALL)
:param input_dependency_constraint: The input dependency constraint. The constraints could
be :data:`InputDependencyConstraint.ANY` or
:data:`InputDependencyConstraint.ALL`.
"""
self._j_execution_config.setDefaultInputDependencyConstraint(
input_dependency_constraint._to_j_input_dependency_constraint())
return self
def get_default_input_dependency_constraint(self) -> 'InputDependencyConstraint':
"""
Gets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
.. seealso:: :func:`set_default_input_dependency_constraint`
:return: The input dependency constraint of this job. The possible constraints are
:data:`InputDependencyConstraint.ANY` and :data:`InputDependencyConstraint.ALL`.
"""
j_input_dependency_constraint = self._j_execution_config\
.getDefaultInputDependencyConstraint()
return InputDependencyConstraint._from_j_input_dependency_constraint(
j_input_dependency_constraint)
def enable_force_kryo(self) -> 'ExecutionConfig':
"""
Force TypeExtractor to use Kryo serializer for POJOS even though we could analyze as POJO.
In some cases this might be preferable. For example, when using interfaces
with subclasses that cannot be analyzed as POJO.
"""
self._j_execution_config.enableForceKryo()
return self
def disable_force_kryo(self) -> 'ExecutionConfig':
"""
Disable use of Kryo serializer for all POJOs.
"""
self._j_execution_config.disableForceKryo()
return self
def is_force_kryo_enabled(self) -> bool:
"""
:return: Boolean value that represent whether the usage of Kryo serializer for all POJOs
is enabled.
"""
return self._j_execution_config.isForceKryoEnabled()
def enable_generic_types(self) -> 'ExecutionConfig':
"""
Enables the use generic types which are serialized via Kryo.
Generic types are enabled by default.
.. seealso:: :func:`disable_generic_types`
"""
self._j_execution_config.enableGenericTypes()
return self
def disable_generic_types(self) -> 'ExecutionConfig':
"""
Disables the use of generic types (types that would be serialized via Kryo). If this option
is used, Flink will throw an ``UnsupportedOperationException`` whenever it encounters
a data type that would go through Kryo for serialization.
Disabling generic types can be helpful to eagerly find and eliminate the use of types
that would go through Kryo serialization during runtime. Rather than checking types
individually, using this option will throw exceptions eagerly in the places where generic
types are used.
**Important:** We recommend to use this option only during development and pre-production
phases, not during actual production use. The application program and/or the input data may
be such that new, previously unseen, types occur at some point. In that case, setting this
option would cause the program to fail.
.. seealso:: :func:`enable_generic_types`
"""
self._j_execution_config.disableGenericTypes()
return self
def has_generic_types_disabled(self) -> bool:
"""
Checks whether generic types are supported. Generic types are types that go through Kryo
during serialization.
Generic types are enabled by default.
.. seealso:: :func:`enable_generic_types`
.. seealso:: :func:`disable_generic_types`
:return: Boolean value that represent whether the generic types are supported.
"""
return self._j_execution_config.hasGenericTypesDisabled()
def enable_auto_generated_uids(self) -> 'ExecutionConfig':
"""
Enables the Flink runtime to auto-generate UID's for operators.
.. seealso:: :func:`disable_auto_generated_uids`
"""
self._j_execution_config.enableAutoGeneratedUIDs()
return self
def disable_auto_generated_uids(self) -> 'ExecutionConfig':
"""
Disables auto-generated UIDs. Forces users to manually specify UIDs
on DataStream applications.
It is highly recommended that users specify UIDs before deploying to
production since they are used to match state in savepoints to operators
in a job. Because auto-generated ID's are likely to change when modifying
a job, specifying custom IDs allow an application to evolve overtime
without discarding state.
"""
self._j_execution_config.disableAutoGeneratedUIDs()
return self
def has_auto_generated_uids_enabled(self) -> bool:
"""
Checks whether auto generated UIDs are supported.
Auto generated UIDs are enabled by default.
.. seealso:: :func:`enable_auto_generated_uids`
.. seealso:: :func:`disable_auto_generated_uids`
:return: Boolean value that represent whether auto generated UIDs are supported.
"""
return self._j_execution_config.hasAutoGeneratedUIDsEnabled()
def enable_force_avro(self) -> 'ExecutionConfig':
"""
Forces Flink to use the Apache Avro serializer for POJOs.
**Important:** Make sure to include the *flink-avro* module.
"""
self._j_execution_config.enableForceAvro()
return self
def disable_force_avro(self) -> 'ExecutionConfig':
"""
Disables the Apache Avro serializer as the forced serializer for POJOs.
"""
self._j_execution_config.disableForceAvro()
return self
def is_force_avro_enabled(self) -> bool:
"""
Returns whether the Apache Avro is the default serializer for POJOs.
:return: Boolean value that represent whether the Apache Avro is the default serializer
for POJOs.
"""
return self._j_execution_config.isForceAvroEnabled()
def enable_object_reuse(self) -> 'ExecutionConfig':
"""
Enables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions. Keep in mind that this can lead to bugs when the
user-code function of an operation is not aware of this behaviour.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableObjectReuse()
return self
def disable_object_reuse(self) -> 'ExecutionConfig':
"""
Disables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions.
.. seealso:: :func:`enable_object_reuse`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableObjectReuse()
return self
def is_object_reuse_enabled(self) -> bool:
"""
Returns whether object reuse has been enabled or disabled.
.. seealso:: :func:`enable_object_reuse`
:return: Boolean value that represent whether object reuse has been enabled or disabled.
"""
return self._j_execution_config.isObjectReuseEnabled()
def get_global_job_parameters(self) -> Dict[str, str]:
"""
Gets current configuration dict.
:return: The configuration dict.
"""
return dict(self._j_execution_config.getGlobalJobParameters().toMap())
def set_global_job_parameters(self, global_job_parameters_dict: Dict) -> 'ExecutionConfig':
"""
Register a custom, serializable user configuration dict.
Example:
::
>>> config.set_global_job_parameters({"environment.checkpoint_interval": "1000"})
:param global_job_parameters_dict: Custom user configuration dict.
"""
gateway = get_gateway()
Configuration = gateway.jvm.org.apache.flink.configuration.Configuration
j_global_job_parameters = Configuration()
for key in global_job_parameters_dict:
if not isinstance(global_job_parameters_dict[key], str):
value = str(global_job_parameters_dict[key])
else:
value = global_job_parameters_dict[key]
j_global_job_parameters.setString(key, value)
self._j_execution_config.setGlobalJobParameters(j_global_job_parameters)
return self
def add_default_kryo_serializer(self,
type_class_name: str,
serializer_class_name: str) -> 'ExecutionConfig':
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> config.add_default_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.addDefaultKryoSerializer(type_clz, j_serializer_clz)
return self
def register_type_with_kryo_serializer(self,
type_class_name: str,
serializer_class_name: str) -> 'ExecutionConfig':
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> config.register_type_with_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.registerTypeWithKryoSerializer(type_clz, j_serializer_clz)
return self
def register_pojo_type(self, type_class_name: str) -> 'ExecutionConfig':
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_pojo_type("com.aaa.bbb.PojoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerPojoType(type_clz)
return self
def register_kryo_type(self, type_class_name: str) -> 'ExecutionConfig':
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_kryo_type("com.aaa.bbb.KryoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerKryoType(type_clz)
return self
def get_registered_types_with_kryo_serializer_classes(self) -> Dict[str, str]:
"""
Returns the registered types with their Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo Serializer
classes.
"""
j_clz_map = self._j_execution_config.getRegisteredTypesWithKryoSerializerClasses()
registered_serializers = {}
for key in j_clz_map:
registered_serializers[key.getName()] = j_clz_map[key].getName()
return registered_serializers
def get_default_kryo_serializer_classes(self) -> Dict[str, str]:
"""
Returns the registered default Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo default
Serializer classes.
"""
j_clz_map = self._j_execution_config.getDefaultKryoSerializerClasses()
default_kryo_serializers = {}
for key in j_clz_map:
default_kryo_serializers[key.getName()] = j_clz_map[key].getName()
return default_kryo_serializers
def get_registered_kryo_types(self) -> List[str]:
"""
Returns the registered Kryo types.
:return: The list of full-qualified java class names of the registered Kryo types.
"""
j_clz_set = self._j_execution_config.getRegisteredKryoTypes()
return [value.getName() for value in j_clz_set]
def get_registered_pojo_types(self) -> List[str]:
"""
Returns the registered POJO types.
:return: The list of full-qualified java class names of the registered POJO types.
"""
j_clz_set = self._j_execution_config.getRegisteredPojoTypes()
return [value.getName() for value in j_clz_set]
def is_auto_type_registration_disabled(self) -> bool:
"""
Returns whether Flink is automatically registering all types in the user programs with
Kryo.
:return: ``True`` means auto type registration is disabled and ``False`` means enabled.
"""
return self._j_execution_config.isAutoTypeRegistrationDisabled()
def disable_auto_type_registration(self) -> 'ExecutionConfig':
"""
Control whether Flink is automatically registering all types in the user programs with
Kryo.
"""
self._j_execution_config.disableAutoTypeRegistration()
return self
def is_use_snapshot_compression(self) -> bool:
"""
Returns whether he compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:return: ``True`` means enabled and ``False`` means disabled.
"""
return self._j_execution_config.isUseSnapshotCompression()
def set_use_snapshot_compression(self, use_snapshot_compression: bool) -> 'ExecutionConfig':
"""
Control whether the compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:param use_snapshot_compression: ``True`` means enabled and ``False`` means disabled.
"""
self._j_execution_config.setUseSnapshotCompression(use_snapshot_compression)
return self
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_execution_config == other._j_execution_config
def __hash__(self):
return self._j_execution_config.hashCode()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
from copy import copy
from twitter.common.collections import OrderedSet
from pants.base.exceptions import TaskError
from pants.base.target import Target
from pants.backend.core.tasks.task import Task
# TODO(benjy): The exclusives implementation needs re-doing. It's too clunky right now.
class CheckExclusives(Task):
"""Computes transitive exclusive maps.
This computes transitive exclusive tags for a dependency graph rooted
with a set of build targets specified by a user. If this process produces
any collisions where a single target contains multiple tag values for a single
exclusives key, then it generates an error and the compilation will fail.
See `Tag incompatibilities with exclusives
<build_files.html#howto-check-exclusives>`_
to see how folks use this.
Data about exclusives is provided to other tasks via data build products.
If the build data product 'exclusives_groups' is required, then an
ExclusivesMapping object will be created.
"""
@classmethod
def product_types(cls):
return ['exclusives_groups']
@classmethod
def register_options(cls, register):
super(CheckExclusives, cls).register_options(register)
register('--error-on-collision', default=True, action='store_true',
help='Abort the build if an exclusives collision is detected.')
def prepare(self, round_manager):
round_manager.require_data('java')
round_manager.require_data('scala')
def _compute_exclusives_conflicts(self, targets):
"""Compute the set of distinct chunks of targets that are required based on exclusives.
If two targets have different values for a particular exclusives tag,
then those targets must end up in different chunks.
This method computes the exclusives values that define each chunk.
e.g.: if target a has exclusives {"x": "1", "z": "1"}, target b has {"x": "2"},
target c has {"y", "1"}, and target d has {"y", "2", "z": "1"}, then we need to
divide into chunks on exclusives tags "x" and "y". We don't need to include
"z" in the chunk specification, because there are no conflicts on z.
Parameters:
targets: a list of the targets being built.
Return: the set of exclusives tags that should be used for chunking.
"""
exclusives_map = defaultdict(set)
for t in targets:
if t.exclusives is not None:
for k in t.exclusives:
exclusives_map[k] |= t.exclusives[k]
conflicting_keys = defaultdict(set)
for k in exclusives_map:
if len(exclusives_map[k]) > 1:
conflicting_keys[k] = exclusives_map[k]
return conflicting_keys
def execute(self):
targets = self.context.targets()
# compute transitive exclusives
for t in targets:
t._propagate_exclusives()
# Check for exclusives collision.
for t in targets:
excl = t.get_all_exclusives()
for key in excl:
if len(excl[key]) > 1:
msg = 'target %s has more than one exclusives tag for key %s: %s' % \
(t.address.reference(), key, list(excl[key]))
if self.get_options().error_on_collision:
raise TaskError(msg)
else:
print('Warning: %s' % msg)
if self.context.products.is_required_data('exclusives_groups'):
mapping = ExclusivesMapping(self.context)
partition_keys = self._compute_exclusives_conflicts(targets)
for key in partition_keys:
mapping.add_conflict(key, partition_keys[key])
mapping._populate_target_maps(targets)
self.context.products.safe_create_data('exclusives_groups', lambda: mapping)
class ExclusivesMapping(object):
def __init__(self, context):
self.context = context
self.conflicting_exclusives = {}
self.key_to_targets = defaultdict(set)
self.target_to_key = {}
self.ordering = None
self._group_classpaths = {} # key -> OrderedSet.
def add_conflict(self, key, values):
"""Register a conflict on an exclusives key.
Parameters:
key the exclusives key with a conflicting_exclusives
value the different values used for the key in different targets.
"""
self.conflicting_exclusives[key] = values
def get_targets_for_group_key(self, key):
"""Gets the set of targets that share exclusives.
Parameters:
key: a key, generated by _get_exclusives_key, for the exclusives
settings shared by a group of targets.
Return: the set of targets that share the exclusives settings. Returns
an empty set if no targets have that key.
"""
return self.key_to_targets[key]
def get_group_key_for_target(self, target):
""" Get the exclusives key for a target """
return self.target_to_key[target]
def get_group_keys(self):
"""Get the set of keys for all exclusives groups in the current build."""
if len(self.conflicting_exclusives) == 0:
return ["<none>"]
else:
return self.key_to_targets.keys()
def get_ordered_group_keys(self):
"""Compute the correct order in which to compile exclusives groups.
In group, we already do group-based ordering. But that ordering is done separately on
each exclusives group. If we have a grouping:
a(exclusives={x: 1, y:2}, dependencies=[ ':b', ':c' ])
b(exclusives={x:"<none>", y: "<none>"}, dependencies=[])
c(exclusives={x:<none>, y:2}, dependencies=[':b'])
If we were to do grouping in the exclusives ordering {x:<none>, y:2}, {x: <none>, y:<none>},
{x:1, y:2}, then we'd be compiling the group containing c before the group containing b; but
c depends on b.
"""
def number_of_emptys(key):
if key == "<none>":
return len(self.conflicting_exclusives)
return key.count("<none>")
if self.ordering is not None:
return self.ordering
# The correct order is from least exclusives to most exclusives - a target can only depend on
# other targets with fewer exclusives than itself.
keys_by_empties = [ [] ] * len(self.key_to_targets)
# Flag to indicate whether there are any groups without any exclusives.
no_exclusives = False
for k in self.key_to_targets:
if k == "<none>":
no_exclusives = True
else:
keys_by_empties[number_of_emptys(k)].append(k)
result = [ ]
for i in range(len(keys_by_empties)):
for j in range(len(keys_by_empties[i])):
result.append(keys_by_empties[i][j])
if no_exclusives:
result.append("<none>")
result.reverse()
self.ordering = result
return self.ordering
def _get_exclusives_key(self, target):
# compute an exclusives group key: a list of the exclusives values for the keys
# in the conflicting keys list.
target_key = []
for k in self.conflicting_exclusives:
excl = target.exclusives if isinstance(target, Target) else target.declared_exclusives
if len(excl[k]) > 0:
target_key.append("%s=%s" % (k, list(excl[k])[0]))
else:
target_key.append("%s=<none>" % k)
if target_key == []:
return "<none>"
else:
return ','.join(target_key)
def _populate_target_maps(self, targets):
"""Populates maps of exclusive keys to targets, and vice versa."""
all_targets = set()
workqueue = copy(targets)
while len(workqueue) > 0:
t = workqueue.pop()
if t not in all_targets:
all_targets.add(t)
workqueue += t.dependencies
for t in all_targets:
key = self._get_exclusives_key(t)
if key == '':
raise TaskError('Invalid empty group key')
if key not in self._group_classpaths:
self._group_classpaths[key] = OrderedSet()
self.key_to_targets[key].add(t)
self.target_to_key[t] = key
def get_classpath_for_group(self, group_key):
"""Get the classpath to use for jvm compilations of a group.
Each exclusives group requires a distinct classpath. We maintain
them here as a map from the exclusives key to a classpath. The
classpath is updated during compilations to add the results of
compiling a group to the classpaths of other groups that could depend on it.
"""
if group_key not in self._group_classpaths:
self._group_classpaths[group_key] = OrderedSet()
# get the classpath to use for compiling targets within the group specified by group_key.
return list(reversed(self._group_classpaths[group_key]))
def _key_to_map(self, key):
result = {}
if key == '<none>' or key == '':
return result
pairs = key.split(',')
for p in pairs:
(k, v) = p.split("=")
result[k] = v
return result
def _is_compatible(self, mod_key, other_key):
# Check if a set of classpath modifications produced by compiling elements of the group
# specified by mod_key should be added to the classpath of other_key's group.
# A key is a list of comma separated name=value keys.
# keys match, if and only of for all pairs k=v1 from mod, and k=v2 from other,
# either v1 == v2 or v1 == <none>.
mod_map = self._key_to_map(mod_key)
other_map = self._key_to_map(other_key)
for k in mod_map:
vm = mod_map[k]
vo = other_map[k]
if not (vm == vo or vm == "<none>"):
return False
return True
def update_compatible_classpaths(self, group_key, path_additions):
"""Update the classpath of all groups compatible with group_key, adding path_additions to their
classpath.
"""
additions = list(reversed(path_additions))
for key in self._group_classpaths:
if group_key is None or self._is_compatible(group_key, key):
group_classpath = self._group_classpaths[key]
group_classpath.update(additions)
def set_base_classpath_for_group(self, group_key, classpath):
# set the initial classpath of the elements of group_key to classpath.
self._group_classpaths[group_key] = OrderedSet(reversed(classpath))
| |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import logging
import os
import platform
import re
import subprocess
from telemetry.internal.backends.chrome import minidump_symbolizer
from telemetry.internal.results import artifact_logger
# Directories relative to the build directory that may contain symbol binaries
# that can be dumped to symbolize a minidump.
_POSSIBLE_SYMBOL_BINARY_DIRECTORIES = [
'lib.unstripped',
os.path.join('android_clang_arm', 'lib.unstripped'),
os.path.join('android_clang_arm64', 'lib.unstripped'),
]
# Mappings from Crashpad/Breakpad processor architecture values to regular
# expressions that will match the output of running "file" on a .so compiled
# for that architecture.
# The Breakpad processor architecture values are hex representations of the
# values in MDCPUArchitecture from Breakpad's minidump_format.h.
_BREAKPAD_ARCH_TO_FILE_REGEX = {
# 32-bit ARM.
'0x5': r'.*32-bit.*ARM.*',
# 64-bit ARM.
'0xc': r'.*64-bit.*ARM.*',
}
# Line looks like " processor_architecture = 0xc ".
_PROCESSOR_ARCH_REGEX = r'\s*processor_architecture\s*\=\s*(?P<arch>\w*)\s*'
class AndroidMinidumpSymbolizer(minidump_symbolizer.MinidumpSymbolizer):
def __init__(self, dump_finder, build_dir, symbols_dir=None):
"""Class for handling all minidump symbolizing code on Android.
Args:
dump_finder: The minidump_finder.MinidumpFinder instance that is being
used to find minidumps for the test.
build_dir: The directory containing Chromium build artifacts to generate
symbols from.
symbols_dir: An optional path to a directory to store symbols for re-use.
Re-using symbols will result in faster symbolization times, but the
provided directory *must* be unique per browser binary, e.g. by
including the hash of the binary in the directory name.
"""
# Map from minidump path (string) to minidump_dump output (string).
self._minidump_dump_output = {}
# Map from minidump path (string) to the directory that should be used when
# looking for symbol binaries (string).
self._minidump_symbol_binaries_directories = {}
# We use the OS/arch of the host, not the device.
super(AndroidMinidumpSymbolizer, self).__init__(
platform.system().lower(), platform.machine(), dump_finder, build_dir,
symbols_dir=symbols_dir)
def SymbolizeMinidump(self, minidump):
if platform.system() != 'Linux':
logging.warning(
'Cannot get Android stack traces unless running on a Posix host.')
return None
if not self._build_dir:
logging.warning(
'Cannot get Android stack traces without build directory.')
return None
return super(AndroidMinidumpSymbolizer, self).SymbolizeMinidump(minidump)
def GetSymbolBinaries(self, minidump):
"""Returns a list of paths to binaries where symbols may be located.
Args:
minidump: The path to the minidump being symbolized.
"""
libraries = self._ExtractLibraryNamesFromDump(minidump)
symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)
if not symbol_binary_dir:
return []
return [os.path.join(symbol_binary_dir, lib) for lib in libraries]
def GetBreakpadPlatformOverride(self):
return 'android'
def _ExtractLibraryNamesFromDump(self, minidump):
"""Extracts library names that may contain symbols from the minidump.
This is a duplicate of the logic in Chromium's
//build/android/stacktrace/crashpad_stackwalker.py.
Returns:
A list of strings containing library names of interest for symbols.
"""
default_library_name = 'libmonochrome.so'
minidump_dump_output = self._GetMinidumpDumpOutput(minidump)
if not minidump_dump_output:
logging.warning(
'Could not get minidump_dump output, defaulting to library %s',
default_library_name)
return [default_library_name]
library_names = []
module_library_line_re = re.compile(r'[(]code_file[)]\s+= '
r'"(?P<library_name>lib[^. ]+.so)"')
in_module = False
for line in minidump_dump_output.splitlines():
line = line.lstrip().rstrip('\n')
if line == 'MDRawModule':
in_module = True
continue
if line == '':
in_module = False
continue
if in_module:
m = module_library_line_re.match(line)
if m:
library_names.append(m.group('library_name'))
if not library_names:
logging.warning(
'Could not find any library name in the dump, '
'default to: %s', default_library_name)
return [default_library_name]
return library_names
def _GetSymbolBinaryDirectory(self, minidump, libraries):
"""Gets the directory that should contain symbol binaries for |minidump|.
Args:
minidump: The path to the minidump being analyzed.
libraries: A list of library names that are within the minidump.
Returns:
A string containing the path to the directory that should contain the
symbol binaries that can be dumped to symbolize |minidump|. Returns None
if the directory is unable to be determined for some reason.
"""
if minidump in self._minidump_symbol_binaries_directories:
return self._minidump_symbol_binaries_directories[minidump]
# Get the processor architecture reported by the minidump.
arch = None
matcher = re.compile(_PROCESSOR_ARCH_REGEX)
for line in self._GetMinidumpDumpOutput(minidump).splitlines():
match = matcher.match(line)
if match:
arch = match.groupdict()['arch'].lower()
break
if not arch:
logging.error('Unable to find processor architecture for minidump %s',
minidump)
self._minidump_symbol_binaries_directories[minidump] = None
return None
if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:
logging.error(
'Unsupported processor architecture %s for minidump %s. This is '
'likely fixable by adding the correct mapping for the architecture '
'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',
arch, minidump)
self._minidump_symbol_binaries_directories[minidump] = None
return None
# Look for a directory that contains binaries with the correct architecture.
matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])
symbol_dir = None
for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:
possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)
if not os.path.exists(possible_symbol_dir):
continue
for f in os.listdir(possible_symbol_dir):
if f not in libraries:
continue
binary_path = os.path.join(possible_symbol_dir, f)
stdout = subprocess.check_output(
['file', binary_path], stderr=subprocess.STDOUT)
if matcher.match(stdout):
symbol_dir = possible_symbol_dir
break
if not symbol_dir:
logging.error(
'Unable to find suitable symbol binary directory for architecture %s.'
'This is likely fixable by adding the correct directory to '
'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',
arch)
self._minidump_symbol_binaries_directories[minidump] = symbol_dir
return symbol_dir
def _GetMinidumpDumpOutput(self, minidump):
"""Runs minidump_dump on the given minidump.
Caches the result for re-use.
Args:
minidump: The path to the minidump being analyzed.
Returns:
A string containing the output of minidump_dump, or None if it could not
be retrieved for some reason.
"""
if minidump in self._minidump_dump_output:
logging.debug('Returning cached minidump_dump output for %s', minidump)
return self._minidump_dump_output[minidump]
dumper_path = os.path.join(self._build_dir, 'minidump_dump')
if not os.access(dumper_path, os.X_OK):
logging.warning('Cannot run minidump_dump because %s is not found.',
dumper_path)
return None
# Using subprocess.check_output with stdout/stderr mixed can result in
# errors due to log messages showing up in the minidump_dump output. So,
# use Popen and combine into a single string afterwards.
p = subprocess.Popen(
[dumper_path, minidump], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout + '\n' + stderr
if p.returncode != 0:
# Dumper errors often do not affect stack walkability, just a warning.
# It's possible for the same stack to be symbolized multiple times, so
# add a timestamp suffix to prevent artifact collisions.
now = datetime.datetime.now()
suffix = now.strftime('%Y-%m-%d-%H-%M-%S')
artifact_name = 'dumper_errors/%s-%s' % (
os.path.basename(minidump), suffix)
logging.warning(
'Reading minidump failed, but likely not actually an issue. Saving '
'output to artifact %s', artifact_name)
artifact_logger.CreateArtifact(artifact_name, stdout)
if stdout:
self._minidump_dump_output[minidump] = stdout
return stdout
| |
"""
jobs.py - pdf rasterization routines
Author
Sacha Zyto <sacha@csail.mit.edu>
License
Copyright (c) 2010-2012 Massachusetts Institute of Technology.
MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
"""
import sys,os
import datetime
if "" not in sys.path:
sys.path.append("")
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'nbsite.settings'
from django.conf import settings
from base import utils, models as M
import glob, json, pyPdf, shutil, re, random, string, logging
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from os.path import dirname, abspath
id_log = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,10)])
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)s %(message)s', filename='/tmp/nb_utils_pdf_%s.log' % ( id_log,), filemode='a')
def process_file(id, res, scales, pdf_dir, img_dir, fmt):
#insert metadata if not there:
filename = "%s/%s" % (pdf_dir, id)
#this is where we test for good PDF:
numpages = 0
h = 0
w = 0
do_crop = True
ROTATE_KEY = "/Rotate"
try:
pdf_object = pyPdf.PdfFileReader(file(filename, "rb"))
if pdf_object.isEncrypted and pdf_object.decrypt("")==0:
print "PDF file encrypted with non-empty password: %s" % (filename,)
return False
numpages = pdf_object.getNumPages()
p = pdf_object.getPage(0)
box = p.trimBox
hm = int(p.mediaBox.getUpperRight_y())
wm = int(p.mediaBox.getUpperRight_x())
ht = int(box.getUpperRight_y() - box.getLowerLeft_y())
wt = int(box.getUpperRight_x() - box.getLowerLeft_x())
rotation = 0 if ROTATE_KEY not in p else int(p[ROTATE_KEY])
if wm<=wt or hm<=ht: #we have a doubt: use media_box
do_crop = False
w = wm
h = hm
else: #seems ok to use trimbox
w = wt
h = ht
except pyPdf.utils.PdfReadError:
print "PdfReadError for %s ! Aborting !!!" % (filename,)
return False
except:
print "OTHER PDF ERROR for %s - Skipping\nDetails: %s" % (filename,sys.exc_info()[0] )
return False
s = M.Source.objects.get(pk=id)
s.numpages = numpages
s.w = w
s.h = h
s.rotation = rotation
#version is set up somewhere else, so it doesn't get called multiple times...
s.save()
#if "100" not in scales:
# assert False, "100 should be in scales for resolution %s, which only contains %s " % (res,scales)
d_ref = 72
for i in xrange(0,numpages):
for scale in scales:
pageno = fmt % (i,)
density = (int(res)*int(scale))/100
output_dir = "%s/%s/%s" % (img_dir, res, scale)
output_file = ("%s_"+fmt+".png") % (id, i)
crop_params = " -crop %sx%s+%s+%s " % (w*density/d_ref, h*density/d_ref,box.getLowerLeft_x()*density/d_ref,box.getLowerLeft_y()*density/d_ref) if do_crop else ""
#now try w/ mu.pdf:
src = "%s/%s" % (pdf_dir, id)
cmd_rasterize = "nice pdfdraw -o %s/%s -r %s -b 8 %s %s" % (output_dir, output_file, density, src, (i+1))
#cmd = "nice convert -quality 100 %s -density %s %s/%s[%s] %s/%s/%s/%s_%s.png" % (crop_cmd, density, pdf_dir, id,i, img_dir, res,scale, id, pageno)
cmd_crop = "echo" if crop_params=="" else "nice convert -quality 100 %s -density %s %s/%s %s/%s" % (crop_params, density,output_dir, output_file, output_dir, output_file)
cmd = "(%s) && (%s)" % (cmd_rasterize, cmd_crop)
print cmd
retval = os.system(cmd)
return True
def regenerate_pdf_metadata(*t_args):
"""
Regenerate metadata info of existing PDFs in the database
usage: utils_pdf metadata start_at_id [alternate_repository_dir]
NOTE: This only takes the 1st page into consideration
"""
if len(t_args)>0:
args=t_args[0]
if (len(args)<1):
print "usage: utils_pdf metadata start_at_id [alternate_pdf_dir] "
return
start_at_id = args[0]
rep_dir = args[1] if len(args)==2 else "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
if not os.path.exists(rep_dir):
print "this repository dir doesn't exist: %s" % (rep_dir,)
return
import views
sources = M.Source.objects.filter(id__gte=int(start_at_id))
for source in sources:
print "BEGIN metadata id=%s" %(source.id,)
views.insert_pdf_metadata(source.id, rep_dir)
print "END metadata id=%s" %(source.id,)
print "ALL DONE."
def update_rotation(*t_args):
"""
Updates rotation info of existing PDFs in the database
usage: utils_pdf update_rotation start_at_id [alternate_repository_dir]
NOTE: This only takes the 1st page into consideration
"""
ROTATE_KEY = "/Rotate"
if len(t_args)>0:
args=t_args[0]
DB = db.Db()
if (len(args)<1):
print "usage: utils_pdf update_rotation start_at_id [alternate_pdf_dir] "
return
start_at_id = args[0] if len(args)>=1 else 1
rep_dir = args[1] if len(args)==2 else "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
if not os.path.exists(rep_dir):
print "this repository dir doesn't exist: %s" % (rep_dir,)
return
rows = DB.getRows("select id from source where id >= ? order by id", (start_at_id, ))
for r in rows:
id = r[0]
filename = "%s/%s" % (rep_dir, id)
if os.path.exists(filename):
try:
pdf_object = pyPdf.PdfFileReader(file(filename, "rb"))
if pdf_object.isEncrypted and pdf_object.decrypt("")==0:
print "PDF file encrypted with non-empty password: %s" % (filename,)
return False
p = pdf_object.getPage(0)
if ROTATE_KEY in p:
r = int(p[ROTATE_KEY])
#print "found rotation=%d in %s" % (r, id)
DB.doTransaction("update pdf_data set rotation=? where id_source = ?", (r, id))
print id,
except pyPdf.utils.PdfReadError:
print "\nPdfReadError for %s - Skipping" % (filename,)
except:
print "\nOTHER ERROR for %s - Skipping\nDetails: %s" % (filename,sys.exc_info()[0] )
else:
print "\n%s not in repository" %(filename, )
def update_dims(*t_args):
"""
Updates dimensions of existing PDFs in the database
usage: utils_pdf update_dims start_at_id [alternate_repository_dir]
NOTE: This only takes media_box into consideration (so that we keep the compatibility w/ existing annotations)
"""
print "DO NOT USE w/ pdfs uploaded after 20111117, as this will break the compatibility w/ existing annotations (since we've switched between mediabox and trimbox on 20111117)"
return
if len(t_args)>0:
args=t_args[0]
DB = db.Db()
if (len(args)<1):
print "usage: utils_pdf update_dims start_at_id [alternate_pdf_dir] "
return
start_at_id = args[0] if len(args)>=1 else 1
rep_dir = args[1] if len(args)==2 else "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
if not os.path.exists(rep_dir):
print "this repository dir doesn't exist: %s" % (rep_dir,)
return
rows = DB.getRows("select id from source where id >= ? order by id", (start_at_id, ))
for r in rows:
id = r[0]
filename = "%s/%s" % (rep_dir, id)
if os.path.exists(filename):
try:
pdf_object = pyPdf.PdfFileReader(file(filename, "rb"))
if pdf_object.isEncrypted and pdf_object.decrypt("")==0:
print "PDF file encrypted with non-empty password: %s" % (filename,)
return False
p = pdf_object.getPage(0)
box = p.trimBox
#h = int(box.getUpperRight_y() - box.getLowerLeft_y())
#w = int(box.getUpperRight_x() - box.getLowerLeft_x())
h = int(p.mediaBox.getUpperRight_y())
w = int(p.mediaBox.getUpperRight_x())
DB.doTransaction("update pdf_data set nrows=? ,ncols=? where id_source = ?", (h, w, id))
print id,
except pyPdf.utils.PdfReadError:
print "\nPdfReadError for %s - Skipping" % (filename,)
except:
print "\nOTHER ERROR for %s - Skipping" % (filename, )
else:
print "\n%s not in repository" %(filename, )
def file_update(*t_args):
"""
Updates a existing file with a new file.
"""
if len(t_args)>0:
args=t_args[0]
if len(args)<2:
print "usage: utils_pdf file_update id_source filename"
return
id_source = args[0]
filename = args[1]
#copy old file
rep_dir = "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
archive_dir = "%s/%s" % (rep_dir, "archive")
if not os.path.exists(archive_dir):
os.mkdir(archive_dir)
o = M.Ownership.objects.get(source__id=id_source)
o.published = datetime.datetime.now()
o.save()
shutil.move("%s/%s" % (rep_dir, id_source), "%s/%s_%s" % (archive_dir, id_source, o.source.version))
shutil.copy2(filename,"%s/%s" % (rep_dir, id_source))
regenerate_file((id_source,))
def split_chapters(*t_args):
"""
Split a large pdf into chunks (i.e. chapters)
"""
if len(t_args)>0:
args=t_args[0]
if len(args)<1:
print "usage: utils_pdf split_chapters configfile"
return
from pyPdf import PdfFileWriter, PdfFileReader
f = open(args[0])
P = json.loads(f.read())
f.close()
input = PdfFileReader(file(P["source"], "rb"))
i0 = P["first_chapter_index"]
ends = P["chapters_ends"]
for i in xrange(0, len(ends)):
ch_num = i0+i
fmt = P["chapter_fmt"] % (ch_num, )
output = PdfFileWriter()
if not os.path.exists(P["outputdir"]):
os.mkdir( P["outputdir"])
fn_out = "%s/%s%s" % (P["outputdir"], P["chapter_prefix"], fmt)
j0 = P["firstpage"] if i==0 else ends[i-1]
for j in xrange(j0, ends[i]):
output.addPage(input.getPage(j))
outputStream = file(fn_out, "wb")
output.write(outputStream)
outputStream.close()
print "wrote %s" % (fn_out,)
def upload_chapters(*t_args):
"""
upload chapters in a bulk fashion
"""
if len(t_args)>0:
args=t_args[0]
if len(args)<1:
print "usage: utils_pdf upload_chapters configfile"
return
DB = db.Db()
f = open(args[0])
P = json.loads(f.read())
f.close()
i0 = P["first_chapter_index"]
ends = P["chapters_ends"]
fmt2 = settings.IMG_FMT_STRING
resolutions = P["RESOLUTIONS"] if "RESOLUTIONS" in P else settings.RESOLUTIONS
rep_dir = "%s/%s" % (settings.HTTPD_MEDIA,settings.RESTRICTED_REPOSITORY_DIR) if P["restricted"] else "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
cache_dir = "%s/%s" % (settings.HTTPD_MEDIA_CACHE,settings.CACHE_DIR)
for i in xrange(0, len(ends)):
ch_num = i0+i
fmt = P["chapter_fmt"] % (ch_num, )
title = "%s%s" % ( P["chapter_prefix"], fmt)
fn = "%s/%s" % (P["outputdir"], title)
id_source = DB.getVal(""" select nextval('source_id_seq') """,())
DB.doTransaction("""insert into source(id, scheme,dn,port, path, query, submittedby) values (?, ?, ?, ?, ?, ?, ?)""", (id_source, "http", "localhost","8000","/%s" %(title,), "id_ensemble=%s" % (P["id_ensemble"], ), None))
DB.doTransaction("insert into ownership(id_source, id_ensemble) values (?,?)", (id_source,P["id_ensemble"]))
shutil.copy2(fn,"%s/%s" % (rep_dir, id_source))
for res in resolutions:
if process_file(id_source, res, resolutions[res],rep_dir, cache_dir, fmt2, DB):
print "%s: success ! " % (id_source, )
else:
print "%s: failed ! " % (id_source, )
def process_next(args=[]):
#are there any running tasks ?
in_process = M.Processqueue.objects.filter(started__isnull=False, completed=None)
if in_process.count() == 0: #queue is available for processing
#get 1st task that needs to be run
tasks = M.Processqueue.objects.filter(started=None)
if tasks.count()==0:
print "process_next - nothing to do"
return
task = tasks[0]
task.started=datetime.datetime.now()
task.save()
id_source = task.source_id
resolutions={}
if len(args)>1:
resolutions[args[1]]={"100": None}
else:
resolutions = settings.RESOLUTIONS
rep_dir = "%s/%s" % (settings.HTTPD_MEDIA,settings.REPOSITORY_DIR)
cache_dir = "%s/%s" % (settings.HTTPD_MEDIA_CACHE,settings.CACHE_DIR)
fmt = settings.IMG_FMT_STRING
for res in resolutions:
if not os.path.exists( "%s/%s" % (cache_dir, res)):
os.mkdir( "%s/%s" % (cache_dir, res))
for scale in resolutions[res]:
if not os.path.exists( "%s/%s/%s" % (cache_dir, res, scale)):
os.mkdir( "%s/%s/%s" % (cache_dir, res, scale))
print "about to regenerate %s" %(id_source,)
if not process_file(id_source, res, resolutions[res],rep_dir, cache_dir, fmt):
print "Error happened with pdf: deleting %d from records " %(id_source,)
V = {"reply_to": settings.SMTP_REPLY_TO,
"email": task.source.submittedby.email,
"source_id": task.source.id,
"title": task.source.title,
"submitted": task.submitted,
"support": settings.SUPPORT_LINK,
"contact_email": settings.NBTEAM_EMAIL,
"firstname": task.source.submittedby.firstname
}
task.delete()
M.Ownership.objects.get(source__id=id_source).delete()
M.Source.objects.get(pk=id_source).delete()
msg = render_to_string("email/msg_pdferror",V)
email = EmailMessage("NB was unable to read a PDF file that you've submitted",
msg,
settings.EMAIL_FROM,
(V["email"], settings.SMTP_CC_PDFERROR ),
(settings.EMAIL_BCC, ))
email.send()
print msg
return
#mark that processing is done:
task.completed = datetime.datetime.now()
task.save()
s = task.source
s.version = s.version+1
s.save()
V = {"reply_to": settings.SMTP_REPLY_TO,
"email": task.source.submittedby.email,
"title": task.source.title,
"submitted": task.submitted,
"protocol": settings.PROTOCOL,
"hostname": settings.HOSTNAME,
"id_source": id_source,
"firstname": task.source.submittedby.firstname
}
msg = render_to_string("email/msg_pdfdone",V)
email = EmailMessage(
"The PDF file that you've submitted is now ready on NB.",
msg,
settings.EMAIL_FROM,
(V["email"], settings.SMTP_CC_USER ),
(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
#are there still tasks needing to be run ?
tasks = M.Processqueue.objects.filter(started=None)
if tasks.count() != 0:
new_id_source = tasks[0].source.id
basedir = dirname(dirname(abspath(__file__)))
cmd = "(cd %s; python -m upload.jobs file_img %s >> /tmp/uploadscript.log 2>&1 &)" %(basedir, new_id_source)
logging.info(cmd)
os.system(cmd)
else:
print "PDF queue is now empty"
else:
print "PDF task already running (%s)." % (in_process[0].id, )
def regenerate_file(*t_args):
"""
(re)-generates the images for a given file
"""
if len(t_args)>0:
args=t_args[0]
if len(args)==0:
print "Missing id_source"
return
id_source = args[0]
#Add new source to the queue if not present or present and finished:
o = M.Processqueue.objects.filter(source__id=id_source, completed=None)
if o.count()==0:
o = M.Processqueue(source_id=id_source)
o.save()
process_next(args)
if __name__ == "__main__" :
ACTIONS = {
"file_img": regenerate_file,
"file_update": file_update,
"update_dims": update_dims,
"split_chapters": split_chapters,
"upload_chapters": upload_chapters,
"update_rotation": update_rotation,
"metadata": regenerate_pdf_metadata
}
utils.process_cli(__file__, ACTIONS)
| |
#!/usr/bin/env python3
# *************************************************************
#
# The OpenTRV project licenses this file to you
# under the Apache Licence, Version 2.0 (the "Licence");
# you may not use this file except in compliance
# with the Licence. You may obtain a copy of the Licence at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Licence is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Licence for the
# specific language governing permissions and limitations
# under the Licence.
#
# *************************************************************
# Author(s) / Copyright (s): Deniz Erbilgin 2016
# Mark Hill 2016
import RPi.GPIO as GPIO
import time
import serial as ser
import csv
import sys
### VERSION NUMBER.
CONFIG_REV7_VERSION = 4
### CONSTANTS
pin_REV7 = 11 ## REV7 power pin. Uses GPIO in board mode (i.e. the Pi header pin number rather than the chip pin number)
RESET_TIME = 9 ## Number of seconds to keep REV7 unpowered when resetting.
# power on REV7
def powerOn():
# print("Power on REV7")
GPIO.output(pin_REV7, GPIO.LOW)
#power off REV7
def powerOff():
# print("Power off REV7")
GPIO.output(pin_REV7, GPIO.HIGH)
# wait for post and initial txs to finish
def waitForCLI(dev):
counter = 0
dev.write(b'\n')
string = dev.readline()
while (string != b'>\r\n') and (counter < 5):
print(string)
dev.write(b'\n')
string = dev.readline()
counter = counter + 1
def detect_USB0_is_REV7(dev, post):
""" Find out what serial device the REV7 is connected to.
Restarts REV7 and checks if POST is as expected.
:param dev: pyserial instance to check for REV7.
:param post: String containing the POST.
:return: False if posts match, else True.
"""
powerOff()
time.sleep(RESET_TIME) # (DE20161118) Increased to ensure REV7 shuts down correctly.
dev.flushInput()
powerOn()
time.sleep(0.5)
dev.readline()
line = dev.readline()
if line.startswith(post):
print("switching ports: rev11 detected: " + str(line))
return False
else:
return True
# power cycle REV7
def powerCycle(dev, post):
powerOff()
time.sleep(RESET_TIME) # (DE20161118) Increased to ensure REV7 shuts down correctly.
dev.flushInput()
powerOn()
time.sleep(0.5)
line1 = dev.readline()
print ("REV7: " + str(line1))
line2 = dev.readline()
print ("REV7: " + str(line2))
if line1.startswith(post) or line2.startswith(post):
print ("REV7 found OK: " + repr(line2))
return 1
else:
print("*********************************REV7 not found")
print("********* CHECK BATTERY PACK TURNED ON ! ******")
powerOff()
GPIO.cleanup()
exit()
# setup REV7 power pin
def setup():
print("-----------------Setup REV7 power pin")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_REV7, GPIO.OUT)
GPIO.output(pin_REV7, GPIO.HIGH)
# close and free REV7 power pin
def end():
print("---------------------Close and free REV7 power pin")
powerOff()
GPIO.cleanup()
# Wait for prompt character
def sendCmd(dev, buf):
while(dev.read() != b'>'):
x = 0
dev.write(buf + b'\n')
# get value from csv
def getKey(myfile, serNo):
with open(myfile, 'r', newline='') as keyfile:
keys = csv.reader(keyfile, delimiter=',')
for row in keys:
if serNo in row:
print(": ".join(row))
return row[1]
return 0
# write to output csv
def writeOut(myfile, serNo, key, id):
if serNo == 0:
return 0
else:
with open(myfile, 'a', newline = '') as outputfile:
outputcsv = csv.writer(outputfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
outputcsv.writerow([serNo, key, id])
return 1
# Set REV7 key
def setKey(dev, key):
sendCmd(dev, key.encode('ascii')) ## prepends 'K B ' to key
# need to be able to read back reply
# Get REV7 ID
def getID(dev):
sendCmd(dev, b'I')
print("REV7: " + repr(dev.readline()))
string = dev.readline()
print ("REV7 key decode attempt: " + repr(string))
print("REV7: " + repr(dev.readline()))
return string[4:27].decode()
# Clear REV7 node ID
def clearID(dev):
sendCmd(dev, b'I *')
print(dev.readlines(3))
# Clear REV11 node associations
def clearNodes(dev):
sendCmd(dev, b'A *')
print(dev.readlines(3))
# Set REV11 node association
def setNode(dev, node):
sendCmd(dev, b'A ' + node.encode())
print(dev.readlines(3))
# Set G 0 238
def setStartDelay(dev):
sendCmd(dev, b'G 0 238')
print(dev.readlines(3))
sendCmd(dev, b'G 0')
string = dev.readlines(6)
# print(string)
return string[1].decode()
# Main program
def main(argv):
port_REV7 = '/dev/ttyUSB0' ## REV7 serial connection
port_REV11 = '/dev/ttyUSB1' ## REV11 serial connection
baud_REV7 = 4800 ## REV7 baud
KEYFILE = 'rev7-7700-7999.csv' ## csv containing serial number and key associations
OUTPUTFILE = 'nodeAssociations.csv' ## csv to write serial number, key and node ID to
key_REV7 = ''
id_REV7 = ''
serNo_REV7 = argv[0] ## gets serial number form CLI argument
post_REV11 = b'OpenTRV: board V0.2 REV11'
post_REV7 = b'OpenTRV: board V0.2 REV7' ## standard REV7 post
# options and stuff
# This will probably involve sys and getopt
# get key from csv
print("config_rev7_v%d\n\n" % CONFIG_REV7_VERSION)
print ("--------------------------------Getting key")
key_REV7 = getKey(KEYFILE, serNo_REV7)
print(key_REV7) ## todo should be deleted to prevent people seeing key?
print ("--------------------------------Open REV11 serial port")
rev11 = ser.Serial(port_REV11, baud_REV7, timeout=2)
print ("--------------------------------Open REV7 serial port")
rev7 = ser.Serial(port_REV7, baud_REV7, timeout=2) ## serial port
setup()
if detect_USB0_is_REV7(rev7, post_REV11):
print ("------->> REV11 is not connected to USB0")
else:
print ("------->> REV11 is connected to USB0; switching port order")
rev_temp = rev7
rev7 = rev11
rev11 = rev_temp
# check for REV7
print ("--------------------------------Checking for REV7")
print(powerCycle(rev7, post_REV7))
waitForCLI(rev7)
# Set start delay
print("+++++++++++++++++++++Setting start delay")
print("Result: " + repr(setStartDelay(rev7)))
# Reset ID
print("+++++++++++++++++++++Clearing ID")
clearID(rev7)
# get ID
print("++++++++++++++++++++++Getting ID")
id_REV7 = getID(rev7)
if len(id_REV7) < 20:
id_REV7 = getID(rev7)
if len(id_REV7) < 20:
id_REV7 = getID(rev7)
if len(id_REV7) < 20:
print("*********************************Could not get REV7 ID!")
powerOff()
GPIO.cleanup()
exit()
print(id_REV7)
# set key on REV7
print("++++++++++++++++++++++Setting Key")
setKey(rev7, key_REV7)
print(rev7.readlines(5))
print("++++++++++++++++++++++power cycle")
print(powerCycle(rev7, post_REV7))
waitForCLI(rev7)
setKey(rev7, key_REV7)
line = rev7.readlines(1)
print ("REV7: set key, output is: " + repr(line))
if line[0][:-2] != key_REV7.encode('ascii'):
print ("want:" + repr(key_REV7.encode('ascii')))
print (" got:" + repr(line[0][:-2]))
print ("key does not match!")
exit()
print(rev7.readlines(5))
# Check key has stuck
print("++++++++++++++++++++++Verifying key set")
# 1. set key on REV11
print("---------------REV11 output--------------------")
print("---------------REV11 output--------------------")
print("---------------REV11 output--------------------")
print("---------------REV11 output--------------------")
print("---------------REV11 output--------------------")
rev11.flushInput()
setKey(rev11, key_REV7)
print(rev11.readlines(5))
clearNodes(rev11)
print(rev11.readlines(5))
setNode(rev11, id_REV7)
print(rev11.readlines(5))
# 2. reset REV7
print("=======================flush input REV11")
rev11.flushInput()
print("=======================power cycle REV7")
powerCycle(rev7, post_REV7)
# 3. wait for receive
print("=======================waiting for 7 lines/15 seconds REV11")
start_time = time.time()
lines_received = 0
match_found = False
rev7ID = id_REV7.replace(' ', '')
while (start_time + 15 > time.time()) and (lines_received < 30) and (match_found == False):
line = rev11.readlines(1)
line = line[0].decode()
print ("line: " + line, end='')
if rev7ID in line:
match_found = True
print ("<<<<match found>>>> " + line)
lines_received += 1
if match_found:
print("*****************Success!" + " " + serNo_REV7)
# output csv stuff
writeOut(OUTPUTFILE, serNo_REV7, key_REV7, id_REV7)
else:
print("!!!!!!!!!!!!!!!!!!!!!!!Failed!")
rev7.close()
end()
if __name__ == "__main__":
main(sys.argv[1:])
| |
# Copyright 2020 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Functions for plotting incidence, recruitment, and events in the Baseline Site Selection Tool."""
from bsst import colors_config as cc
import matplotlib as mpl
import numpy as np
import pandas as pd
from bsst import plot_utils
import warnings
from bsst import ville_config
# All functions here take an axis argument and modify it in place.
# Functions in plot_utils return arguments.
pd.plotting.register_matplotlib_converters() # need to run on this pandas version
def turn_spines_off(ax, list_of_spines=['bottom', 'top', 'left', 'right']):
"""Turn off sides of the bounding box.
Args:
ax: The axis instance to format
list_of_spines: A list of spines to turn off
"""
for spine in list_of_spines:
ax.spines[spine].set_visible(False)
def format_time_axis(ax, num_ticks=4, include_labels=True, date_format='%b'):
"""Formats the x axis to be time in datetime form, with num_ticks.
Args:
ax: The axis instance we want to format, we assume time is on the xaxis.
num_ticks: An int representing the number of ticks to include on the final image.
include_labels: Bool representing whether to include text labels
date_format: A string representing how to format the date. '%b' is month
only.
"""
range = ax.get_xlim()
step_size = (range[1] - range[0]) // (num_ticks - 1)
ticks = range[0] + [i * step_size for i in np.arange(num_ticks)]
dt_ticks = mpl.dates.num2date(ticks) if not np.issubdtype(ticks.dtype, np.datetime64) else ticks
labels = [pd.to_datetime(x).strftime(date_format) for x in dt_ticks]
ax.set_xticks(ticks=ticks)
if include_labels:
ax.set_xticklabels(labels=labels, rotation=30)
ax.set_xlabel('date')
else:
ax.set_xticklabels(labels=[], visible=False)
def format_hist_time_axis(ax, bins, special_bins=[(0, '<'), (-2, '>'),
(-1, 'Did not\nsucceed')],
num_ticks=4, include_labels=True, date_format='%b-%d'):
"""Formats the x axis to be time in datetime form, with num_ticks.
Args:
ax: The axis instance we want to format, we assume time is on the xaxis.
bins: The bins used to plot the histogram
special_bins: A series of tuples with the first entry representing the
index of a bins with special values and the second entry
representing the label to give to the bin.
num_ticks: An int representing the number of ticks to include on the final image.
include_labels: Bool representing whether to include text labels
date_format: A string representing how to format the date. '%b' is month
only.
"""
eligible_bins = np.delete(bins, [idx for idx in [special_bins[i][0] for i in range(len(special_bins))]])
# Don't put a tick at the far right, as we need to see the DnC label
step_size = len(eligible_bins) // num_ticks
ticks = eligible_bins[[i * step_size for i in np.arange(num_ticks)]]
dt_ticks = mpl.dates.num2date(ticks) if not np.issubdtype(ticks.dtype, np.datetime64) else ticks
labels = [pd.to_datetime(x).strftime(date_format) for x in dt_ticks]
ticks = np.append(ticks, bins[[special_bins[i][0] for i in range(len(special_bins))]])
labels = np.append(labels, [special_bins[i][1] for i in range(len(special_bins))])
ax.set_xticks(ticks=ticks)
if include_labels:
ax.set_xticklabels(labels=labels, rotation=30)
ax.set_xlabel('date')
else:
ax.set_xticklabels(labels=[], visible=False)
def array_over_time(ax, array_to_plot, first_plot_day=None, plot_kwargs={'color':'b', 'ls':'-'}):
"""Plot array_to_plot as a function of time.
If array has a `sample` or `scenario` dimension, then all samples will be plotted with a
low opacity (alpha) value.
Args:
ax: An axes instance to plot our data on.
array_to_plot: A xr.DataArray with a time dimension, and optionally a sample OR
scenario dimension
first_plot_day: Optional, a time coordinate indicating the first date to plot.
plot_kwargs: Optional, a dictionary with keyword arguments to pass to matplotlib.plot
"""
time_dim = plot_utils.find_time_dim(array_to_plot)
shaped_data = array_to_plot.transpose(time_dim, ...)
if first_plot_day in array_to_plot.coords[time_dim].values:
data = shaped_data.sel({time_dim:slice(first_plot_day, None)})
else:
data = shaped_data
if any(item in data.dims for item in ['sample', 'scenario', 'sample_flattened']):
alpha = 0.1
else:
alpha = 1.0
ax.plot(data[time_dim], data.values, **plot_kwargs, alpha=alpha)
def cum_control_events(ax, control_events, first_plot_day, color, linestyle):
"""Plot cumulative control arm events over time or historical_time.
Args:
ax: The axis instance we want to plot on.
control_events: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
time_dim = plot_utils.find_time_dim(control_events)
cum_events = control_events.cumsum(time_dim)
plot_array_over_time(ax, cum_events, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('Cumulative control events')
def incidence(ax, incidence, first_plot_day, color, linestyle):
"""Plot incidence over time or historical_time.
Args:
ax: The axis instance we want to plot on.
incidence: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
array_over_time(ax, incidence, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('New cases / population')
def cum_recruits(ax, recruits, first_plot_day, color, linestyle):
"""Plot cumulative recruits over a time dimension.
Args:
ax: The axis instance we want to plot on.
recruits: The xr.DataArray that we want to plot. Must have either
'time' OR 'historical_time' dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
time_dim = plot_utils.find_time_dim(recruits)
cum_recruits = recruits.cumsum(time_dim)
array_over_time(ax, cum_recruits, first_plot_day, {'color': color, 'ls': linestyle})
ax.set_ylabel('Cumulative recruits')
def cum_subrecruits(ax, recruits, first_plot_day, color, linestyle):
"""Plot the cumulative sum of recruits to compare across many populations.
Args:
ax: A series of axis instances to plot on.
recruits: An xr.DataArray that representing the expected or
observed recruits. Must have a time dimension.
first_plot_day: An int representing the first date to plot.
color: A mpl color.
linestyle: A mpl linestyle.
"""
sel_recruits = plot_utils.unpack_participant_labels(recruits)
labels_to_plot = plot_utils.get_labels_to_plot(recruits)
num_plots = len(ax)
for i, label in enumerate(labels_to_plot):
a = ax[i]
participants = sel_recruits.sel(participant_label=label, drop=True)
a.set_title(label)
time_dim = plot_utils.find_time_dim(participants)
array_over_time(a, participants.cumsum(time_dim), first_plot_day,
{'color': color, 'ls': linestyle})
if i in [num_plots-2, num_plots-1]:
format_time_axis(a, 3, date_format='%b-%d')
else:
format_time_axis(a, 3, include_labels=False)
def recruits(dim_to_plot, ax, sorted_recruits, color, linestyle='-', label=None):
"""Plot the recruits as a histogram over dim_to_plot.
Args:
dim_to_plot: A string representing the sorted_recruits.dim to plot
along the x-axis.
ax: An axes instance to plot our data on.
sorted_recruits: A xr.DataArray representing the recruits to plot
where <dim> has been sorted into the desired display order.
color: A mpl color to use as the edgecolor
linestyle: A mpl linestyle
label: A string used as a plot label
"""
dims_to_sum = list(sorted_recruits.dims)
dims_to_sum.remove(dim_to_plot)
thc = cc.TRANSPARENT_HIST_COLOR
bh = cc.BAR_HEIGHT
lw = cc.LINE_WIDTH
sum_rec = sorted_recruits.sum(dims_to_sum)
ax.barh(sum_rec[dim_to_plot], sum_rec, height=bh, fc=color, ec=thc,
alpha=0.3, ls=linestyle, lw=lw, label=label)
def recruit_diffs(dim_to_plot, ax, sorted_recruits, recruits_left,
zero_left_edge=False):
"""Plot the difference between two sets of recruits.
Places vertical lines at the actual recruitment value. Color maps and bar
height read from colors_config.
Args:
dim_to_plot: A string representing the sorted_recruits.dim to plot
along the x-axis.
ax: An axes instance to plot our data on.
sorted_recruits: A xr.DataArray representing the recruits to plot as the
right edge of the bar chart.
where <dim> has been sorted into the desired display order.
recruits_left: A xr.DataArray representing the recruits to plot as the
left edge of the bar chart.
zero_left_edge: A boolean. If True, we plot the left edge at 0.
"""
dims_to_sum = list(sorted_recruits.dims)
dims_to_sum.remove(dim_to_plot)
sum_rec_right = sorted_recruits.sum(dims_to_sum)
sum_rec_left = recruits_left.sum(dims_to_sum)
cmap = cc.BAR_CHART_CMAP
norm = cc.BAR_CHART_NORM
bh = cc.BAR_HEIGHT
ax.set_facecolor(cc.BAR_CHART_FACECOLOR)
# Sort the left edges to match the right edges
ydim = sum_rec_right.dims[0]
sorted_rec_left = sum_rec_left.sel({ydim: sorted_recruits[ydim]})
diff = sum_rec_right - sorted_rec_left
if not zero_left_edge:
bar_plot = ax.barh(diff[ydim], diff, left=sorted_rec_left,
color=cmap(norm(diff.values)), height=bh)
# Add vertical lines at the left-most edges
lh = cc.VLINE_HEIGHT
lc = cc.VLINE_COLOR
# ycoord is lower left of box
ycoords = np.asarray([i.xy[1] for i in bar_plot.get_children()])
lower_lim = ycoords - .5 * (lh - bh)
ax.vlines(sum_rec_right, lower_lim, lower_lim + lh, lc, ls='dashed')
else:
ax.barh(diff[ydim], diff, left=None, color=cmap(norm(diff.values)),
height=bh)
# Add a line at 0 to guide the eye
ax.axvline(color='#000000', lw=1.0)
def tts(ax, events, efficacy, color, linestyle):
"""Plot the time to success distributions.
Args:
ax: The axis instance we want to plot on.
events: An xr.DataArray representing the number of events in
our control arm. Has dimensions (time, location, scenario)
efficacy: A float representing the assumed vaccine efficacy.
color: A mpl color for the bar faces
linestyle: A mpl linestyle for the bar edges
"""
lw = cc.LINE_WIDTH
thc = cc.TRANSPARENT_HIST_COLOR
ax.set_facecolor(thc)
hist, bins = plot_utils.make_tts_hist(events, efficacy)
bw = bins[1] - bins[0]
ax.bar(bins[:-1], hist, width=bw, align='edge',
fc=color, ec=thc, ls=linestyle, lw=lw, alpha=0.3)
format_hist_time_axis(ax, bins[:-1], date_format='%b-%d')
ax.axvline(x=bins[-2], color='#656565', lw=1.0, ls='--')
def tts_diff(ax, proposed_events, baseline_events, efficacy):
"""Plot the difference in time to success distributions.
Args:
ax: The axis instance we want to plot on.
proposed_events: An xr.DataArray representing the number of events in
our control arm. Has dimensions (time, location, scenario)
baseline_events: An xr.DataArray representing the baseline number of
control events. This becomes the bottom edge of the barh plot.
Has dimensions (time, location, scenario)
efficacy: A float representing the assumed vaccine efficacy.
"""
ax.set_facecolor(cc.BAR_CHART_FACECOLOR)
cmap = cc.BAR_CHART_CMAP
norm = cc.DIFF_NORM
proposed_hist, proposed_bins = plot_utils.make_tts_hist(proposed_events, efficacy)
baseline_hist, baseline_bins = plot_utils.make_tts_hist(baseline_events, efficacy)
if np.any(proposed_bins[~np.isnan(proposed_bins)] != baseline_bins[~np.isnan(baseline_bins)]):
warnings.warn(f'Proposed and baseline events have different times.')
diff = proposed_hist - baseline_hist
bw = proposed_bins[1] - proposed_bins[0]
ax.bar(proposed_bins[:-1], diff, width=bw, align='edge',
color=cmap(norm(diff)))
# Add a line at 0 to guide the eye
ax.axhline(color='#000000', lw=1.0)
# add line on the dns bin
ax.axvline(x=proposed_bins[-2], color='#656565', lw=1.0, ls='--')
# Format time axis here
format_hist_time_axis(ax, proposed_bins[:-1], date_format='%b-%d')
| |
import dis
import re
import sys
import textwrap
import unittest
from test.support import cpython_only
from test.bytecode_helper import BytecodeTestCase
class TestTranforms(BytecodeTestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
self.assertNotInBytecode(unot, 'UNARY_NOT')
self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE')
self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE')
def test_elim_inversion_of_is_or_in(self):
for line, cmp_op in (
('not a is b', 'is not',),
('not a in b', 'not in',),
('not a is not b', 'is',),
('not a not in b', 'in',),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'COMPARE_OP', cmp_op)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f():
x = None
x = None
return x
def g():
x = True
return x
def h():
x = False
return x
for func, elem in ((f, None), (g, True), (h, False)):
self.assertNotInBytecode(func, 'LOAD_GLOBAL')
self.assertInBytecode(func, 'LOAD_CONST', elem)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertNotInBytecode(f, 'LOAD_GLOBAL')
self.assertInBytecode(f, 'LOAD_CONST', None)
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotInBytecode(f, elem)
for elem in ('JUMP_ABSOLUTE',):
self.assertInBytecode(f, elem)
def test_pack_unpack(self):
# On PyPy, "a, b = ..." is even more optimized, by removing
# the ROT_TWO. But the ROT_TWO is not removed if assigning
# to more complex expressions, so check that.
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a[1], b = a, b', 'ROT_TWO',),
('a, b[2], c = a, b, c', 'ROT_THREE',),
):
code = compile(line,'','single')
self.assertInBytecode(code, elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.assertNotInBytecode(code, 'UNPACK_TUPLE')
def test_folding_of_tuples_of_constants(self):
# On CPython, "a,b,c=1,2,3" turns into "a,b,c=<constant (1,2,3)>"
# but on PyPy, it turns into "a=1;b=2;c=3".
for line, elem in (
('a = 1,2,3', '((1, 2, 3))'),
('("a","b","c")', "(('a', 'b', 'c'))"),
('a,b,c = 1,2,3', '((1, 2, 3))'),
('(None, 1, None)', '((None, 1, None))'),
('((1, 2), 3, 4)', '(((1, 2), 3, 4))'),
):
code = compile(line,'','single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# Long tuples should be folded too.
code = compile(repr(tuple(range(10000))),'','single')
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# One LOAD_CONST for the tuple, one for the None return value
load_consts = [instr for instr in dis.get_instructions(code)
if instr.opname == 'LOAD_CONST']
self.assertEqual(len(load_consts), 2)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', (1, 2, 3)),
('a not in ["a","b","c"]', ('a', 'b', 'c')),
('a in [None, 1, None]', (None, 1, None)),
('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_LIST')
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
code = compile(line, '', 'single')
self.assertNotInBytecode(code, 'BUILD_SET')
self.assertInBytecode(code, 'LOAD_CONST', elem)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.assertTrue(not g(3))
self.assertTrue(g(4))
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', 9), # chained fold
('"@"*4', '@@@@'), # check string ops
('a="abc" + "def"', 'abcdef'), # check string ops
('a = 3**4', 81), # binary power
('a = 3*4', 12), # binary multiply
('a = 13//4', 3), # binary floor divide
('a = 14%4', 2), # binary modulo
('a = 2+3', 5), # binary add
('a = 13-4', 9), # binary subtract
('a = (12,13)[1]', 13), # binary subscr
('a = 13 << 2', 52), # binary lshift
('a = 13 >> 2', 3), # binary rshift
('a = 13 & 7', 5), # binary and
('a = 13 ^ 7', 10), # binary xor
('a = 13 | 7', 15), # binary or
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('BINARY_'))
# Verify that unfoldables are skipped
code = compile('a=2+"b"', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 2)
self.assertInBytecode(code, 'LOAD_CONST', 'b')
# Verify that large sequences do not result from folding
code = compile('a="x"*1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
@cpython_only # we currently not bother to implement that
def test_binary_subscr_on_unicode(self):
# valid code get optimized
code = compile('"foo"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 'f')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
code = compile('"\u0061\uffff"[1]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\uffff')
self.assertNotInBytecode(code,'BINARY_SUBSCR')
# With PEP 393, non-BMP char get optimized
code = compile('"\U00012345"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\U00012345')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
# invalid code doesn't get optimized
# out of range
code = compile('"fuu"[10]', '', 'single')
self.assertInBytecode(code, 'BINARY_SUBSCR')
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', -0.5), # unary negative
('-0.0', -0.0), # -0.0
('-(1.0-1.0)', -0.0), # -0.0 after folding
('-0', 0), # -0
('~-2', 1), # unary invert
('+1', 1), # unary positive
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Verify that unfoldables are skipped
for line, elem, opname in (
('-"abc"', 'abc', 'UNARY_NEGATIVE'),
('~"abc"', 'abc', 'UNARY_INVERT'),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertInBytecode(code, opname)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
self.assertNotInBytecode(f, 'LOAD_CONST', None)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
self.assertNotInBytecode(f, 'JUMP_FORWARD')
# There should be one jump for the while loop.
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'JUMP_ABSOLUTE']
self.assertEqual(len(returns), 1)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
self.assertNotInBytecode(f, 'BINARY_ADD')
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
'3 * -5',
'-3 * 5',
'2 * (3 * 4)',
'(2 * 3) * 4',
'(-1, 2, 3)',
'(1, -2, 3)',
'(1, 2, -3)',
'(1, 2, -3) * 6',
'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}',
]
for e in exprs:
code = compile(e, '', 'single')
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.assertFalse(instr.opname.startswith('BINARY_'))
self.assertFalse(instr.opname.startswith('BUILD_'))
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
if __name__ == "__main__":
unittest.main()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import sys
from pants.option.arg_splitter import GLOBAL_SCOPE, ArgSplitter
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_value_container import OptionValueContainer
from pants.option.parser_hierarchy import ParserHierarchy, enclosing_scope
from pants.option.scope import ScopeInfo
class Options(object):
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
class OptionTrackerRequiredError(Exception):
"""Options requires an OptionTracker instance."""
@classmethod
def complete_scopes(cls, scope_infos):
"""Expand a set of scopes to include all enclosing scopes.
E.g., if the set contains `foo.bar.baz`, ensure that it also contains `foo.bar` and `foo`.
"""
ret = {GlobalOptionsRegistrar.get_scope_info()}
for scope_info in scope_infos:
ret.add(scope_info)
original_scopes = {si.scope for si in scope_infos}
for scope_info in scope_infos:
scope = scope_info.scope
while scope != '':
if scope not in original_scopes:
ret.add(ScopeInfo(scope, ScopeInfo.INTERMEDIATE))
scope = enclosing_scope(scope)
return ret
@classmethod
def create(cls, env, config, known_scope_infos, args=None, bootstrap_option_values=None,
option_tracker=None,):
"""Create an Options instance.
:param env: a dict of environment variables.
:param config: data from a config file (must support config.get[list](section, name, default=)).
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
:param :class:`pants.option.option_tracker.OptionTracker` option_tracker: option tracker
instance to record how option values were assigned.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos)
args = sys.argv if args is None else args
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
if not option_tracker:
raise cls.OptionTrackerRequiredError()
if bootstrap_option_values:
target_spec_files = bootstrap_option_values.target_spec_files
if target_spec_files:
for spec in target_spec_files:
with open(spec) as f:
target_specs.extend(filter(None, [line.strip() for line in f]))
help_request = splitter.help_request
parser_hierarchy = ParserHierarchy(env, config, complete_known_scope_infos, option_tracker)
values_by_scope = {} # Arg values, parsed per-scope on demand.
bootstrap_option_values = bootstrap_option_values
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker)
def __init__(self, goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker):
"""The low-level constructor for an Options instance.
Dependees should use `Options.create` instead.
"""
self._goals = goals
self._scope_to_flags = scope_to_flags
self._target_specs = target_specs
self._passthru = passthru
self._passthru_owner = passthru_owner
self._help_request = help_request
self._parser_hierarchy = parser_hierarchy
self._values_by_scope = values_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
self._option_tracker = option_tracker
@property
def tracker(self):
return self._option_tracker
@property
def help_request(self):
return self._help_request
@property
def target_specs(self):
"""The targets to operate on."""
return self._target_specs
@property
def goals(self):
"""The requested goals, in the order specified on the cmd line."""
return self._goals
@property
def known_scope_to_info(self):
return self._known_scope_to_info
@property
def scope_to_flags(self):
return self._scope_to_flags
def drop_flag_values(self):
"""Returns a copy of these options that ignores values specified via flags.
Any pre-cached option values are cleared and only option values that come from option defaults,
the config or the environment are used.
"""
# An empty scope_to_flags to force all values to come via the config -> env hierarchy alone
# and empty values in case we already cached some from flags.
no_flags = {}
no_values = {}
return Options(self._goals,
no_flags,
self._target_specs,
self._passthru,
self._passthru_owner,
self._help_request,
self._parser_hierarchy,
no_values,
self._bootstrap_option_values,
self._known_scope_to_info,
self._option_tracker)
def is_known_scope(self, scope):
"""Whether the given scope is known by this instance."""
return scope in self._known_scope_to_info
def passthru_args_for_scope(self, scope):
# Passthru args "belong" to the last scope mentioned on the command-line.
# Note: If that last scope is a goal, we allow all tasks in that goal to access the passthru
# args. This is to allow the more intuitive
# pants run <target> -- <passthru args>
# instead of requiring
# pants run.py <target> -- <passthru args>.
#
# However note that in the case where multiple tasks run in the same goal, e.g.,
# pants test <target> -- <passthru args>
# Then, e.g., both junit and pytest will get the passthru args even though the user probably
# only intended them to go to one of them. If the wrong one is not a no-op then the error will
# be unpredictable. However this is not a common case, and can be circumvented with an
# explicit test.pytest or test.junit scope.
if (scope and self._passthru_owner and scope.startswith(self._passthru_owner) and
(len(scope) == len(self._passthru_owner) or scope[len(self._passthru_owner)] == '.')):
return self._passthru
else:
return []
def register(self, scope, *args, **kwargs):
"""Register an option in the given scope, using argparse params."""
self.get_parser(scope).register(*args, **kwargs)
def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering argparse args on the given scope."""
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register
def get_parser(self, scope):
"""Returns the parser for the given scope, so code can register on it directly."""
return self._parser_hierarchy.get_parser_by_scope(scope)
def walk_parsers(self, callback):
self._parser_hierarchy.walk(callback)
def for_scope(self, scope):
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
"""
# Short-circuit, if already computed.
if scope in self._values_by_scope:
return self._values_by_scope[scope]
# First get enclosing scope's option values, if any.
if scope == GLOBAL_SCOPE:
values = OptionValueContainer()
else:
values = copy.deepcopy(self.for_scope(enclosing_scope(scope)))
# Now add our values.
flags_in_scope = self._scope_to_flags.get(scope, [])
self._parser_hierarchy.get_parser_by_scope(scope).parse_args(flags_in_scope, values)
self._values_by_scope[scope] = values
for option in values:
self._option_tracker.record_option(scope=scope, option=option, value=values[option],
rank=values.get_rank(option))
return values
def registration_args_iter_for_scope(self, scope):
"""Returns an iterator over the registration arguments of each option in this scope.
See `Parser.registration_args_iter` for details.
"""
return self._parser_hierarchy.get_parser_by_scope(scope).registration_args_iter()
def get_fingerprintable_for_scope(self, scope):
"""Returns a list of fingerprintable (option type, option value) pairs for the given scope.
Fingerprintable options are options registered via a "fingerprint=True" kwarg.
"""
pairs = []
# Note that we iterate over options registered at `scope` and at all enclosing scopes, since
# option-using code can read those values indirectly via its own OptionValueContainer, so
# they can affect that code's output.
registration_scope = scope
while registration_scope is not None:
# This iterator will have already sorted the options, so their order is deterministic.
for (name, _, kwargs) in self.registration_args_iter_for_scope(registration_scope):
if kwargs.get('recursive') and not kwargs.get('recursive_root'):
continue # We only need to fprint recursive options once.
if kwargs.get('fingerprint') is not True:
continue
# Note that we read the value from scope, even if the registration was on an enclosing
# scope, to get the right value for recursive options (and because this mirrors what
# option-using code does).
val = self.for_scope(scope)[name]
val_type = kwargs.get('type', '')
pairs.append((val_type, val))
registration_scope = (None if registration_scope == ''
else enclosing_scope(registration_scope))
return pairs
def __getitem__(self, scope):
# TODO(John Sirois): Mainly supports use of dict<str, dict<str, str>> for mock options in tests,
# Consider killing if tests consolidate on using TestOptions instead of the raw dicts.
return self.for_scope(scope)
def bootstrap_option_values(self):
"""Return the option values for bootstrap options.
General code can also access these values in the global scope. But option registration code
cannot, hence this special-casing of this small set of options.
"""
return self._bootstrap_option_values
def for_global_scope(self):
"""Return the option values for the global scope."""
return self.for_scope(GLOBAL_SCOPE)
| |
"""
fs.mountfs
==========
Contains MountFS class which is a virtual filesystem which can have other filesystems linked as branched directories.
For example, lets say we have two filesystems containing config files and resources respectively::
[config_fs]
|-- config.cfg
`-- defaults.cfg
[resources_fs]
|-- images
| |-- logo.jpg
| `-- photo.jpg
`-- data.dat
We can combine these filesystems in to a single filesystem with the following code::
from fs.mountfs import MountFS
combined_fs = MountFS
combined_fs.mountdir('config', config_fs)
combined_fs.mountdir('resources', resources_fs)
This will create a single filesystem where paths under `config` map to `config_fs`, and paths under `resources` map to `resources_fs`::
[combined_fs]
|-- config
| |-- config.cfg
| `-- defaults.cfg
`-- resources
|-- images
| |-- logo.jpg
| `-- photo.jpg
`-- data.dat
Now both filesystems can be accessed with the same path structure::
print combined_fs.getcontents('/config/defaults.cfg')
read_jpg(combined_fs.open('/resources/images/logo.jpg')
"""
from fs.base import *
from fs.errors import *
from fs.path import *
from fs import _thread_synchronize_default
class DirMount(object):
def __init__(self, path, fs):
self.path = path
self.fs = fs
def __str__(self):
return "Mount point: %s"%self.path
class FileMount(object):
def __init__(self, path, open_callable, info_callable=None):
self.open_callable = open_callable
def no_info_callable(path):
return {}
self.info_callable = info_callable or no_info_callable
class MountFS(FS):
"""A filesystem that delegates to other filesystems."""
_meta = { 'virtual': True,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
}
DirMount = DirMount
FileMount = FileMount
def __init__(self, thread_synchronize=_thread_synchronize_default):
super(MountFS, self).__init__(thread_synchronize=thread_synchronize)
self.mount_tree = PathMap()
def __str__(self):
return "<MountFS>"
__repr__ = __str__
def __unicode__(self):
return unicode(self.__str__())
def _delegate(self, path):
path = abspath(normpath(path))
object = None
head_path = "/"
tail_path = path
for prefix in recursepath(path):
try:
object = self.mount_tree[prefix]
except KeyError:
pass
else:
head_path = prefix
tail_path = path[len(head_path):]
if type(object) is MountFS.DirMount:
return object.fs, head_path, tail_path
if type(object) is MountFS.FileMount:
return self, "/", path
try:
self.mount_tree.iternames(path).next()
except StopIteration:
return None, None, None
else:
return self, "/", path
def getsyspath(self, path, allow_none=False):
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
if allow_none:
return None
else:
raise NoSysPathError(path=path)
return fs.getsyspath(delegate_path, allow_none=allow_none)
def getpathurl(self, path, allow_none=False):
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
if allow_none:
return None
else:
raise NoPathURLError(path=path)
return fs.getpathurl(delegate_path, allow_none=allow_none)
@synchronize
def desc(self, path):
fs, mount_path, delegate_path = self._delegate(path)
if fs is self:
if fs.isdir(path):
return "Mount dir"
else:
return "Mounted file"
return "Mounted dir, maps to path %s on %s" % (delegate_path, str(fs))
@synchronize
def isdir(self, path):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
object = self.mount_tree.get(path, None)
return not isinstance(object,MountFS.FileMount)
return fs.isdir(delegate_path)
@synchronize
def isfile(self, path):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
object = self.mount_tree.get(path, None)
return isinstance(object,MountFS.FileMount)
return fs.isfile(delegate_path)
@synchronize
def exists(self, path):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
return True
return fs.exists(delegate_path)
@synchronize
def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
paths = self.mount_tree.names(path)
return self._listdir_helper(path,
paths,
wildcard,
full,
absolute,
dirs_only,
files_only)
else:
paths = fs.listdir(delegate_path,
wildcard=wildcard,
full=False,
absolute=False,
dirs_only=dirs_only,
files_only=files_only)
for nm in self.mount_tree.names(path):
if nm not in paths:
if dirs_only:
if self.isdir(pathjoin(path,nm)):
paths.append(nm)
elif files_only:
if self.isfile(pathjoin(path,nm)):
paths.append(nm)
else:
paths.append(nm)
if full or absolute:
if full:
path = relpath(normpath(path))
else:
path = abspath(normpath(path))
paths = [pathjoin(path, p) for p in paths]
return paths
@synchronize
def ilistdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
paths = self.mount_tree.names(path)
for path in self._listdir_helper(path,paths,wildcard,full,absolute,dirs_only,files_only):
yield path
else:
paths = fs.ilistdir(delegate_path,
wildcard=wildcard,
full=False,
absolute=False,
dirs_only=dirs_only)
extra_paths = set(self.mount_tree.names(path))
if full:
pathhead = relpath(normpath(path))
def mkpath(p):
return pathjoin(pathhead,p)
elif absolute:
pathhead = abspath(normpath(path))
def mkpath(p):
return pathjoin(pathhead,p)
else:
def mkpath(p):
return p
for p in paths:
if p not in extra_paths:
yield mkpath(p)
for p in extra_paths:
if dirs_only:
if self.isdir(pathjoin(path,p)):
yield mkpath(p)
elif files_only:
if self.isfile(pathjoin(path,nm)):
yield mkpath(p)
else:
yield mkpath(p)
@synchronize
def makedir(self, path, recursive=False, allow_recreate=False):
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" )
if not delegate_path:
if allow_recreate:
return
else:
raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize
def open(self, path, mode="r", **kwargs):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
callable = object.open_callable
return callable(path, mode, **kwargs)
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ResourceNotFoundError(path)
return fs.open(delegate_path, mode, **kwargs)
@synchronize
def setcontents(self, path, data, chunk_size=64*1024):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
return super(MountFS,self).setcontents(path, data, chunk_size=chunk_size)
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ParentDirectoryMissingError(path)
return fs.setcontents(delegate_path, data, chunk_size)
@synchronize
def createfile(self, path):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
return super(MountFS,self).setcontents(path, contents, chunk_size=chunk_size)
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ParentDirectoryMissingError(path)
return fs.createfile(delegate_path)
@synchronize
def remove(self, path):
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise UnsupportedError("remove file", msg="Can only remove paths within a mounted dir")
return fs.remove(delegate_path)
@synchronize
def removedir(self, path, recursive=False, force=False):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path")
return fs.removedir(delegate_path, recursive, force)
@synchronize
def rename(self, src, dst):
fs1, mount_path1, delegate_path1 = self._delegate(src)
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is not fs2:
raise OperationFailedError("rename resource", path=src)
if fs1 is not self:
return fs1.rename(delegate_path1, delegate_path2)
object = self.mount_tree.get(path_src, None)
object2 = self.mount_tree.get(path_dst, None)
if object1 is None:
raise ResourceNotFoundError(src)
# TODO!
raise UnsupportedError("rename resource", path=src)
@synchronize
def move(self,src,dst,**kwds):
fs1, mount_path1, delegate_path1 = self._delegate(src)
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.move(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).move(src,dst,**kwds)
@synchronize
def movedir(self,src,dst,**kwds):
fs1, mount_path1, delegate_path1 = self._delegate(src)
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.movedir(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).movedir(src,dst,**kwds)
@synchronize
def copy(self,src,dst,**kwds):
fs1, mount_path1, delegate_path1 = self._delegate(src)
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.copy(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).copy(src,dst,**kwds)
@synchronize
def copydir(self,src,dst,**kwds):
fs1, mount_path1, delegate_path1 = self._delegate(src)
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.copydir(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).copydir(src,dst,**kwds)
@synchronize
def mountdir(self, path, fs):
"""Mounts a host FS object on a given path.
:param path: A path within the MountFS
:param fs: A filesystem object to mount
"""
self.mount_tree[path] = MountFS.DirMount(path, fs)
mount = mountdir
@synchronize
def mountfile(self, path, open_callable=None, info_callable=None):
"""Mounts a single file path.
:param path: A path within the MountFS
:param open_Callable: A callable that returns a file-like object
:param info_callable: A callable that returns a dictionary with information regarding the file-like object
"""
self.mount_tree[path] = MountFS.FileMount(path, callable, info_callable)
@synchronize
def unmount(self, path):
"""Unmounts a path.
:param path: Path to unmount
"""
del self.mount_tree[path]
@synchronize
def settimes(self, path, accessed_time=None, modified_time=None):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
raise UnsupportedError("settimes")
fs.settimes(delegate_path, accessed_time, modified_time)
@synchronize
def getinfo(self, path):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
if self.isfile(path):
return self.mount_tree[path].info_callable(path)
return {}
return fs.getinfo(delegate_path)
@synchronize
def getsize(self, path):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
object = self.mount_tree.get(path, None)
if object is None:
raise ResourceNotFoundError(path)
if not isinstance(object,MountFS.FileMount):
raise ResourceInvalidError(path)
size = object.info_callable(path).get("size", None)
return size
return fs.getinfo(delegate_path).get("size", None)
@synchronize
def getxattr(self,path,name,default=None):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return default
return fs.getxattr(delegate_path,name,default)
@synchronize
def setxattr(self,path,name,value):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
raise UnsupportedError("setxattr")
return fs.setxattr(delegate_path,name,value)
@synchronize
def delxattr(self,path,name):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return True
return fs.delxattr(delegate_path,name)
@synchronize
def listxattrs(self,path):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return []
return fs.listxattrs(delegate_path)
| |
import numpy as np
import math
from python_qt_binding.QtCore import Qt, QMetaType, QDataStream, QVariant, pyqtSignal
from python_qt_binding import loadUi
from rqt_gui_py.plugin import Plugin
from python_qt_binding.QtWidgets import QWidget, QTreeWidget, QTreeWidgetItem,QListWidgetItem, \
QSlider, QGroupBox, QVBoxLayout, QLabel, QLineEdit, QListWidget, QAbstractItemView, QFileDialog, QDoubleSpinBox, QMessageBox, \
QInputDialog, QShortcut
from python_qt_binding.QtGui import QDoubleValidator, QKeySequence, QPixmap, QTransform
import os
import rospkg, rospy
import time
import multiprocessing as mp
import yaml
from label_pool import BallPool, RobotPool, CrossPool, OpponentPool, UndefinedPool, TeammatePool, AngularLabel, Arrowlabel
from status_msg import StatusMsg
from trajectory_msg import TrajectoryMsg
from detection_msg import DetectionMsg
from position_msg import PositionMsg
from name import Name
class RobotInformation:
def __init__(self, robotID, frame, colorString):
self.id = robotID
self.frame = frame
self.color = colorString
self.rob_label = None # Qlabel
self.pixmap = None
self.ang_label = None
self.arrow_label = None
self.x = 0
self.y = 0
self.angel = 0
self.team_color = ""
self.oppo_color = ""
self.currentBallLabel = []
self.currentObstacleLabel = []
self.currentTeammateLabel = []
self.currentOpponentsLabel = []
self.currentPathLabel = []
self.hide = False
self.teammateVisible = True
self.obstacleVisible = True
self.opponentVisible = True
self.ballVisible = True
self.pathVisible = True
class SingleField:
def __init__(self, gameFrame, fieldImageLabel):
self.frame = gameFrame
self.field = fieldImageLabel
rp = rospkg.RosPack()
ip_filename = os.path.join(rp.get_path('bitbots_live_tool_rqt'), 'resource', 'ip_config.yaml')
with open(ip_filename, "r") as file:
config = yaml.load(file)
self.fieldfilename = config.get("FIELD_IMAGE")
self.field_scale_global = config.get("FIELD_SCALE")
self.default_positions = config.get("DEFAULT_POSITIONS")
file.close()
print("Config: " + self.fieldfilename + ", " + str(self.field_scale_global))
rp = rospkg.RosPack()
self.fieldfilename = os.path.join(BallPool.rp.get_path('bitbots_live_tool_rqt'), 'resource', self.fieldfilename)
self.fieldPixmap = QPixmap(self.fieldfilename)
self.field.setPixmap(self.fieldPixmap)
field_width = self.fieldPixmap.width()
field_height = self.fieldPixmap.height()
self.field_aspect = float(field_width) / float(field_height)
self.frame_aspect = float(self.frame.width()) / float(self.frame.height())
self.field_border = 40 # margin to frame
self.transform = QTransform()
self.fieldIsSwitched = False
self.icon_timeout = 3
self.team_colors = {3: "cyan", 2: "magenta"}
if self.field_aspect >= self.frame_aspect: #
self.field_size_x = self.frame.width() - self.field_border*2
self.field_size_y = self.field_size_x / self.field_aspect
else:
self.field_size_y = self.frame.height() - self.field_border * 2
self.field_size_x = self.field_size_y / self.field_aspect
self.field.setScaledContents(True)
self.field.setFixedSize(self.field_size_x, self.field_size_y)
self.field_scale = float(self.field_size_x) / float(field_width)
#print(self.field_scale)
self.screenMidX = int(self.frame.width() / 2)
self.screenMidY = int(self.frame.height() / 2)
self.field.move(self.screenMidX - int(self.field.width() / 2), self.screenMidY - int(self.field.height()/ 2))
self.colors = ["red", "green", "yellow", "blue"]
self.robots = {} # dict where keys are robot IDs
self.tabToRobot = {} # dict where keys are tab-nrs [0-3]
self.ball_pool = BallPool(self.frame, size=42)
self.rob_pool = RobotPool(self.frame, size=48)
self.opp_pool = OpponentPool(self.frame, size=42)
self.team_pool = TeammatePool(self.frame, size=42)
self.cross_pool = CrossPool(self.frame, size=34)
self.undef_pool = UndefinedPool(self.frame, size=42)
# logic
def setSide(self, default):
self.fieldIsSwitched = not default
# adds new robot, color is taken automatically. Color can be given manually
def addRobot(self, robotID, color=""):
if color == "" or color not in self.colors:
if len(self.colors) > 0:
color = self.colors.pop()
else:
color = "red"
rob_info = RobotInformation(robotID, self.frame, color)
rob_info.rob_label = self.rob_pool.getRobotLabel(color)
rob_info.pixmap = rob_info.rob_label.pixmap() # save original pixmap for rotation
rob_info.rob_label.update()
rob_info.ang_label = AngularLabel(self.frame)
rob_info.arrow_label = Arrowlabel(self.frame)
self.frame.update()
self.robots[robotID] = rob_info
self.tabToRobot[ len(self.tabToRobot) ] = rob_info
defpos = {"x": 0, "y": 0}
if len(self.default_positions) > 0:
defpos = self.default_positions.pop()
self.setRobotPosition(robotID, defpos.get("x"), defpos.get("y"), 0) # sets default position
"""
if self.robotsVisible:
rob_info.rob_label.show()
else:
rob_info.rob_label.hide()"""
rob_info.rob_label.show()
rob_info.ang_label.show()
rob_info.arrow_label.show()
print(self.tabToRobot)
print(self.robots)
self.upadteAllVisiblities(rob_info)
return rob_info
def _meter_to_UI(self, val):
return (val / self.field_scale_global) * self.field_scale
# sets the robots position in meter!!!!
def setRobotPosition(self, robotID, x, y, angle):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
if x != None:
if self.fieldIsSwitched:
rob.x = -x
else:
rob.x = x
if y != None:
if self.fieldIsSwitched:
rob.y = -y
else:
rob.y = y
if angle != None:
if self.fieldIsSwitched:
rob.angel = -angle + self.degToRadians(180)
else:
rob.angel = -angle
transform = QTransform()
transform.rotateRadians(rob.angel)
rotPixMap = QPixmap(rob.rob_label.originalPixmap).transformed( transform )
rob.rob_label.setPixmap(rotPixMap)
rob.rob_label.setScaledContents(True)
addScale = (abs(math.sin(rob.angel*2))) * (math.sqrt(2) - 1)
newsize = self.rob_pool.size + (self.rob_pool.size*addScale)
rob.rob_label.setFixedSize(newsize, newsize)
#print(addScale, newsize, rob.rob_label.width())
rob.rob_label.setScaledContents(True)
rob.rob_label.move(self.screenMidX - int(rob.rob_label.width() / 2) + self._meter_to_UI(rob.x), \
self.screenMidY - int(rob.rob_label.height() / 2) - self._meter_to_UI(rob.y))
rob.ang_label.move( rob.rob_label.x() + int(rob.rob_label.width() / 2) - rob.ang_label.width() / 2,\
rob.rob_label.y() + int(rob.rob_label.height() / 2) - rob.ang_label.height() / 2)
rob.arrow_label.move(rob.rob_label.x() + int(rob.rob_label.width() / 2) - rob.arrow_label.width() / 2,\
rob.rob_label.y() + int(rob.rob_label.height() / 2) - rob.arrow_label.height() / 2)
rob.arrow_label.setRoboAngle(self.radToDeg(rob.angel))
rob.ang_label.setAngles(self.radToDeg(-rob.angel), None)
#rob.rob_label.update()
#rob.rob_label.repaint()
def setBallsFor(self, robotID, listPositions, lastUpdate):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
time_secs = rospy.get_rostime().secs
for i in range(len(rob.currentBallLabel)):
self.ball_pool.returnBallLabel( rob.currentBallLabel.pop(), rob.color )
for pos in listPositions:
if time_secs - lastUpdate < self.icon_timeout:
bpx, bpy = self.relToAbs(rob.x, rob.y, rob.angel, pos[0], pos[1])
ball = self.ball_pool.getBallLabel( rob.color )
ball.move(self.screenMidX - int(ball.width() / 2) + self._meter_to_UI(bpx), \
self.screenMidY - int(ball.height() / 2) + self._meter_to_UI(bpy))
rob.currentBallLabel.append(ball)
self.upadteAllVisiblities(rob)
def setPathsFor(self, robotID, listPaths):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
for i in range(len(rob.currentPathLabel)):
self.cross_pool.returnCrossLabel( rob.currentPathLabel.pop(), rob.color )
for pos in listPaths:
cross = self.cross_pool.getCrossLabel( rob.color )
if self.fieldIsSwitched:
cross.move(self.screenMidX - int(cross.width() / 2) - self._meter_to_UI(pos[0]), \
self.screenMidY - int(cross.height() / 2) - self._meter_to_UI(pos[1]))
else:
cross.move(self.screenMidX - int(cross.width() / 2) + self._meter_to_UI(pos[0]), \
self.screenMidY - int(cross.height() / 2) + self._meter_to_UI(pos[1]))
cross.show()
rob.currentPathLabel.append(cross)
self.upadteAllVisiblities(rob)
def setTeammatesFor(self, robotID, listPositions, lastUpdate):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
time_secs = rospy.get_rostime().secs
for i in range(len(rob.currentTeammateLabel)):
self.team_pool.returnTeammateLabel( rob.currentTeammateLabel.pop(), rob.color )
for pos in listPositions:
if time_secs - lastUpdate < self.icon_timeout:
bpx, bpy = self.relToAbs(rob.x, rob.y, rob.angel, pos[0], pos[1])
teamm = self.team_pool.getTeammateLabel( rob.color )
teamm.move(self.screenMidX - int(teamm.width() / 2) + self._meter_to_UI(bpx), \
self.screenMidY - int(teamm.height() / 2) + self._meter_to_UI(bpy))
teamm.show()
rob.currentTeammateLabel.append(teamm)
self.upadteAllVisiblities(rob)
def setOpponentsFor(self, robotID, listPositions, lastUpdate):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
time_secs = rospy.get_rostime().secs
for i in range(len(rob.currentOpponentsLabel)):
self.opp_pool.returnOpponentLabel( rob.currentOpponentsLabel.pop(), rob.color )
for pos in listPositions:
if time_secs - lastUpdate < self.icon_timeout:
bpx, bpy = self.relToAbs(rob.x, rob.y, rob.angel, pos[0], pos[1])
opp = self.opp_pool.getOpponentLabel( rob.color )
opp.move(self.screenMidX - int(opp.width() / 2) + self._meter_to_UI(bpx), \
self.screenMidY - int(opp.height() / 2) + self._meter_to_UI(bpy))
opp.show()
rob.currentOpponentsLabel.append(opp)
self.upadteAllVisiblities(rob)
# obstacles are undefined
def setObstaclesFor(self, robotID, listObstacles, lastUpdate):
if self.robots.has_key(robotID):
rob = self.robots.get(robotID)
time_secs = rospy.get_rostime().secs
for i in range(len(rob.currentObstacleLabel)):
self.undef_pool.returnUndefLabel( rob.currentObstacleLabel.pop(), rob.color )
for pos in listObstacles:
if time_secs - lastUpdate < self.icon_timeout:
bpx, bpy = self.relToAbs(rob.x, rob.y, rob.angel, pos[0], pos[1])
obs = self.undef_pool.getUndefLabel( rob.color )
obs.move(self.screenMidX - int(obs.width() / 2) + self._meter_to_UI(bpx), \
self.screenMidY - int(obs.height() / 2) + self._meter_to_UI(bpy))
obs.show()
rob.currentObstacleLabel.append(obs)
self.upadteAllVisiblities(rob)
# Message decoding ==============================================================================
def setTrajectoryMsg(self, robotID, data):
if not self.robots.has_key(robotID):
self.addRobot(robotID)
# simple absolute target position with target angle
if data.has_key(TrajectoryMsg.label_moveToX):
self.setPathsFor(robotID, [(data.get(TrajectoryMsg.label_moveToX), data.get(TrajectoryMsg.label_moveToY), data.get(TrajectoryMsg.label_finalAngle))])
# Twist vectors: rotation
if data.has_key(TrajectoryMsg.label_rotateVel):
rob = self.robots.get(robotID)
rob.ang_label.setAngles(self.radToDeg(-rob.angel), self.radToDeg(data.get(TrajectoryMsg.label_rotateVel)))
# Twist vectors: linear velocity
if data.has_key(TrajectoryMsg.label_moveVelX):
rob = self.robots.get(robotID)
rob.arrow_label.setLinearAngle(self._meter_to_UI(data.get(TrajectoryMsg.label_moveVelX)), self._meter_to_UI(data.get(TrajectoryMsg.label_moveVelY)))
rob.arrow_label.move(rob.rob_label.x() + int(rob.rob_label.width() / 2) - rob.arrow_label.width() / 2, \
rob.rob_label.y() + int(rob.rob_label.height() / 2) - rob.arrow_label.height() / 2)
rob.arrow_label.setRoboAngle(self.radToDeg(rob.angel))
def setDetectionMsg(self, robotID, data):
if not self.robots.has_key(robotID):
self.addRobot(robotID)
rob = self.robots.get(robotID)
if data.has_key(DetectionMsg.label_ball_info):
self.setBallsFor(robotID, [(data.get(DetectionMsg.label_ball_info).get("x"), data.get(DetectionMsg.label_ball_info).get("y"))], \
data.get(DetectionMsg.label_ball_info).get(Name.last_update))
if data.has_key(DetectionMsg.label_obstacles):
lsUndef = []
lsTeammate = []
lsOpponent = []
for ob in data.get(DetectionMsg.label_obstacles):
ob_color = ob.get(DetectionMsg.label_obstacle_info).get(DetectionMsg.label_color)
if ob_color == rob.team_color: #mitspieler +2 weil die farben
lsTeammate.append([ob.get(DetectionMsg.label_obstacle_pos).get("x"), ob.get(DetectionMsg.label_obstacle_pos).get("y")])
elif ob_color == rob.oppo_color: #gegner
lsOpponent.append([ob.get(DetectionMsg.label_obstacle_pos).get("x"), ob.get(DetectionMsg.label_obstacle_pos).get("y")])
else: #undefined
lsUndef.append([ob.get(DetectionMsg.label_obstacle_pos).get("x"), ob.get(DetectionMsg.label_obstacle_pos).get("y")])
self.setObstaclesFor(robotID, lsUndef, data.get(DetectionMsg.label_last_obstacle_update))
self.setTeammatesFor(robotID, lsTeammate, data.get(DetectionMsg.label_last_obstacle_update))
self.setOpponentsFor(robotID, lsOpponent, data.get(DetectionMsg.label_last_obstacle_update))
def setPositionMsg(self, robotID, data):
if not self.robots.has_key(robotID):
self.addRobot(robotID)
print("add robot: " + robotID)
if(data.has_key(PositionMsg.label_pos)):
self.setRobotPosition(robotID, data.get(PositionMsg.label_pos).get("x"), data.get(PositionMsg.label_pos).get("y"),\
data.get(PositionMsg.label_orientation).get(PositionMsg.label_yaw))
def setStatusMsg(self, robotID, data):
if not self.robots.has_key(robotID):
self.addRobot(robotID)
print("add robot: " + robotID)
if data.has_key(StatusMsg.labelTeamColor):
rob = self.robots.get(robotID)
statusColor = data.get(StatusMsg.labelTeamColor)
statusColor = (1 - statusColor) + 2 # /gamestate und /obstacles have different constants for colors
oppoColor = data.get(StatusMsg.labelTeamColor) + 2
rob.team_color = statusColor
rob.oppo_color = oppoColor
#print(rob.team_color, rob.oppo_color)
# visibility ===================================================================================
# hides all of the tabs components if param hide is true
# num is the tabs number
#def hideAll(self, hide, num):
# Wird aber momentan ueber information_tab geregelt
def changeVisibilityFor(self, ls, visible):
for e in ls:
if visible:
e.show()
else:
e.hide()
def upadteAllVisiblities(self, rob):
self.changeVisibilityFor(rob.currentTeammateLabel, rob.teammateVisible)
self.changeVisibilityFor(rob.currentObstacleLabel, rob.obstacleVisible)
self.changeVisibilityFor(rob.currentOpponentsLabel, rob.opponentVisible)
self.changeVisibilityFor(rob.currentBallLabel, rob.ballVisible)
self.changeVisibilityFor(rob.currentPathLabel, rob.pathVisible)
def setOwnRobotsVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
if visible:
rob.rob_label.show()
rob.ang_label.show()
rob.arrow_label.show()
else:
rob.rob_label.hide()
rob.ang_label.hide()
rob.arrow_label.hide()
def setTeammateVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.teammateVisible = visible
self.upadteAllVisiblities(rob)
def setOpponentVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.opponentVisible = visible
self.upadteAllVisiblities(rob)
def setPathVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.pathVisible = visible
self.upadteAllVisiblities(rob)
"""
def setObstacleVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.obstacleVisible = visible
self.upadteAllVisiblities(rob)
"""
def setBallVisibility(self, visible, num):
#print(num)
#print(self.tabToRobot)
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.ballVisible = visible
#print("ball visible: " + str(rob.ballVisible))
self.upadteAllVisiblities(rob)
def setUndefVisibility(self, visible, num):
if self.tabToRobot.has_key(num):
rob = self.tabToRobot.get(num)
rob.obstacleVisible = visible
self.upadteAllVisiblities(rob)
# Helper ===================================================================
def vec_rotate(self, x, y, angle_rad):
xneu = x * math.cos(angle_rad) - y * math.sin(angle_rad)
yneu = y * math.cos(angle_rad) + x * math.sin(angle_rad)
return [xneu, yneu]
def relToAbs(self, fromx, fromy, fromAng, relx, rely):
rx, ry = self.vec_rotate(relx, rely, fromAng)
return (fromx + rx, fromy + ry)
def radToDeg(self, rads):
return rads * 57.29578
def degToRadians(self, deg):
return deg / 57.29578
| |
from __future__ import division
from .base import BaseMode
from ..models import Meeting, Group, TalkProposal, ThunderdomeVotes
from copy import copy
from datetime import datetime
from random import randint
import re
class Mode(BaseMode):
"""A mdoer for handling Thunderdome sessions."""
def __init__(self, bot):
super(Mode, self).__init__(bot)
# variables that track the state of where we are right now
self.meeting = None
self.current_group = None
self.next_group = None
self.segment = None
self.unaddressed = 0
def chair_start(self, user, channel, meeting_num=None):
"""Begin a meeting. If a meeting number is given, then
resume that meeting. Initializes the next group."""
try:
self.next_group = Group.next_undecided_group()
except IndexError:
self.msg(channel, "There are no unreviewed groups remaining. "
"Clearly, we shouldn't be here.")
return
# pull up the meeting itself, or if no meeting number was specified,
# then create a new meeting record
if meeting_num:
try:
self.meeting = Meeting.objects.get(number=int(meeting_num))
action = 'resumed'
except Meeting.DoesNotExist:
self.msg(channel, 'There is no meeting in the system with that number.')
return
else:
self.meeting = Meeting.objects.create(start=datetime.now(), type="thunderdome")
action = 'started'
# announce that the meeting has begun
self.msg(channel, 'THIS. IS. THUNDERDOME!')
self.msg(channel, "And meeting #{number} has {action}. Let's do this thing!".format(number=self.meeting.number, action=action))
# tell the mode that the meting has begin
self._in_meeting = True
# ask folks for their names iff this is a new meeting
if action == 'started':
self.names(channel)
def chair_next(self, user, channel):
"""Move us to the next group."""
# sanity check: we could be in the "post-report" stage; if we are
# then most likely the chair tried to move to the next group without
# addressing the one we were in -- refuse.
if self.segment == 'post-report':
self.msg(channel, 'We just had a report on the current group. I am cowardly refusing to move on to the next group until results from the current one have been addressed.')
return
# move us to the next group
if self.next_group:
self.current_group = self.next_group
try:
self.next_group = Group.next_undecided_group(after=self.current_group)
except IndexError:
self.next_group = None
else:
self.msg(channel, "There are no groups left for review. We're done!")
return
# print out the group we're now on, and the necessary information about it
self.msg(channel, '=== Thunderdome for "{0}" begins now! ==='.format(self.current_group.name))
self._report_on_group(channel, self.current_group)
self.msg(channel, ' * - * - * - * ')
# now calculate the period of silent time to give to review talks
# before shifting to the debate period
silent_minutes = max(len(self.current_group.talk_ids) * 0.5, 1.5)
self.msg(channel, 'You now have {time} to review these talks and collect your thoughts prior to debate. Please refrain from speaking until debate begins.'.format(
time=self._minutes_to_text(silent_minutes),
))
# now begin the timer and count down the silent period
self.bot.set_timer(channel, silent_minutes * 60, callback=self.chair_debate, callback_kwargs={
'channel': channel,
'user': user,
})
self.segment = 'silent_review'
# set the state handler for the silent review period
self.bot.state_handler = self.handler_silent_review
def chair_debate(self, user, channel):
"""Shift the channel into debate mode. The time allotted for debate
should scale with the number of talks in the group."""
# determine the debate time; it should be a function of the number
# of talks in the group
debate_minutes = len(self.current_group.talk_ids) * 1.5
# announce that we're in debate now
self.msg(channel, '=== General Debate ({time}) for "{name}" ==='.format(
name=self.current_group.name,
time=self._minutes_to_text(debate_minutes),
))
# remove any state handler that is currently on the channel
self.bot.state_handler = None
# set the timer and status
self.bot.set_timer(channel, debate_minutes * 60)
self.segment = 'debate'
def chair_vote(self, user, channel):
"""Shift the channel into voting mode. Accept votes in
any reasonable / parsable format, and collect data for the
final report."""
# clear any existing timer on the bot
self.bot.clear_timer()
# announce that we're shifting into voting
self.msg(channel, '=== Voting time! ===')
self.msg(channel, 'Enter your vote in any form I understand (details: `/msg {nick} voting`). You may vote for as many talks as you like, but remember that we are limited to roughly 110 slots.'.format(
nick=self.bot.nickname,
))
# wipe out the current list of votes (from the last group)
# so that I can store the new list
self.current_votes = {}
self.bot.state_handler = self.handler_user_votes
def chair_extend(self, user, channel, extend_time=1):
"""Extend the time on the clock. In reality, this does nothing
but set another clock, but it's a useful management tool within meetings."""
# if there's an active timer, just delay it
if self.bot.timer and self.bot.timer.active():
self.bot.timer.delay(float(extend_time) * 60)
else:
# clear the timer and set a new one
self.bot.clear_timer()
self.bot.set_timer(channel, float(extend_time) * 60)
# now report the extension
self.msg(channel, '=== Extending time by %s. Please continue. ===' % self._minutes_to_text(extend_time))
def chair_report(self, user, channel):
"""Report the results of the vote that was just taken to the channel."""
# turn off any state handlers
self.bot.state_handler = None
# iterate over each talk in the group, and save its thunderdome
# results to the database
for talk in self.current_group.talks:
supporters = sum([(1 if talk.talk_id in vote else 0) for vote in self.current_votes.values()])
total_voters = len(self.current_votes)
# record the thunderdome votes for this talk
talk.thunderdome_votes = ThunderdomeVotes(
supporters=supporters,
attendees=total_voters,
)
talk.save()
# now get me a sorted list of talks, sorted by the total
# number of votes received (descending)
sorted_talks = sorted(self.current_group.talks, key=lambda t: t.thunderdome_votes, reverse=True)
# print out the talks to the channel, in order from
# those voted well to those voted poorly
for talk in sorted_talks:
self.msg(channel, '{status}: #{talk_id}: {talk_title} ({supporters}/{attendees}, {percent:.2f}%%)'.format(
attendees=talk.thunderdome_votes.attendees,
percent=talk.thunderdome_votes.percent,
status=talk.thunderdome_votes.vote_result.upper(),
supporters=talk.thunderdome_votes.supporters,
talk_id=talk.talk_id,
talk_title=talk.title,
)) # damn auto-% substitution in self.msg... :-/
# declare that we are in the post-report segment
self.segment = 'post-report'
self.unaddressed = len(self.current_group.talk_ids)
def chair_certify(self, user, channel):
"""Certify the results as just reported."""
# sanity check: are we in the post-report segment?
# if not, then this command doesn't make sense
if self.segment != 'post-report':
self.msg(channel, 'There are no results to certify.')
# iterate over the talks and record the results of the voting
accepted = []
damaged = []
rejected = []
for talk in self.current_group.talks:
result = talk.thunderdome_votes.vote_result
if result == 'accepted':
accepted.append(talk.talk_id)
elif result == 'damaged':
damaged.append(talk.talk_id)
elif result == 'rejected':
rejected.append(talk.talk_id)
# actually perform the accepting, damaging, and rejecting
# of the talks based on the votes
self.chair_accept(user, channel, *accepted)
self.chair_damage(user, channel, *damaged)
self.chair_reject(user, channel, *rejected)
def chair_accept(self, user, channel, *talk_ids):
"""Accept the talks provided as arguments."""
self._make_decision(user, channel, 'accepted', *talk_ids)
def chair_reject(self, user, channel, *talk_ids):
"""Reject the talks provided as arguments."""
self._make_decision(user, channel, 'rejected', *talk_ids)
def chair_damage(self, user, channel, *talk_ids):
"""Damage the talks provided as arguments."""
self._make_decision(user, channel, 'damaged', *talk_ids)
def chair_suggest(self, user, channel, talk_alternative, *talk_ids):
"""Set the given talk alternative (poster, open space, etc.) on
the given talk. This does *not* change its main status, since it
may be either damaged or rejected."""
# make sure that each talk ID I was given is a damaged
# or rejected talk in this group; if not, complain loudly and quit
not_found = []
wrong_status = []
talk_objects = []
for talk_id in talk_ids:
try:
talk = self.current_group.talk_by_id(talk_id)
if talk.status not in ('damaged', 'rejected'):
wrong_status.append(talk_id)
else:
talk_objects.append(talk)
except ValueError:
not_found.append(talk_id)
# print out any errata
if len(not_found):
self.msg(channel, 'The following talk{plural} is not part of the current group: {missing}.'.format(
missing=', '.join([str(i) for i in not_found]),
plural='s' if len(not_found) != 1 else '',
))
if len(wrong_status):
self.msg(channel, 'The following talk{plural} has a status that is not "damaged" or "rejected": {wrong_status}. Please damage or reject {pronoun} before giving {pronoun} a suggested talk alternative.'.format(
plural='s' if len(wrong_status) != 1 else '',
pronoun='it' if len(wrong_status) == 1 else 'them',
wrong_status=', '.join([str(i) for i in wrong_status]),
))
# if there were any errata, hard stop
if len(not_found) or len(wrong_status):
self.msg(channel, 'Since I cannot process all of the given input, I am cowardly refusing to do anything. Please try again.')
return
# sanity check: is this an alternative I understand?
if talk_alternative not in [i[0] for i in TalkProposal.TALK_ALTERNATIVES]:
self.msg(channel, 'I do not recognize the talk alternative "{0}". Sorry.'.format(talk_alternative))
return
# okay, apply the alternative status to every requested talk
for talk in talk_objects:
talk.alternative = talk_alternative
talk.save()
self.msg(channel, '== Suggested {alternative} for talk{plural} {talks}. ==='.format(
alternative=talk_alternative.replace('_', ' '),
plural='s' if len(talk_objects) != 1 else '',
talks=', '.join(talk_ids)
))
def _make_decision(self, user, channel, decision, *talk_ids):
# sanity check: if there is an empty list of talk ids
# (which could happen, since `chair_certify` doesn't check
# for a non-zero list), then simply do nothing
if not talk_ids:
return
# iterate over each provided talk id, get the talk from
# the group's list of talks, and make the decision on the talk
talks = []
errors = []
for talk_id in talk_ids:
try:
talk_id = int(talk_id)
talk = self.current_group.talk_by_id(talk_id)
talks.append(talk)
except ValueError:
errors.append(talk_id)
# if there were errors on any of the talk ids given,
# then error out now
if errors:
self.msg(channel, 'The following talk{plural} are not part of the active group and could not be {decision}: {badness}'.format(
badness=', '.join([str(i) for i in errors]),
decision=decision,
plural='s' if len(errors) != 1 else '',
))
self.msg(channel, 'As some of the input is in error, and because I am a very picky robot, I am cowardly refusing to do anything.')
return
# actually make the decision on the given talks
for talk in talks:
talk.thunderdome_result = decision
talk.status = decision
talk.save()
# Record the talk to the list of talks decided in this meeting
if self.meeting:
self.meeting.update(add_to_set__talks_decided=talk)
# report success to the channel
self.msg(channel, '=== Talk{plural} {decision}: {talk_ids} ==='.format(
decision=decision.capitalize(),
plural='s' if len(talks) else '',
talk_ids=', '.join([str(i.talk_id) for i in talks]),
))
# if we don't have any more unaddressed talks, nix the segment and
# mark the group as "done"
if not self.current_group.undecided_talks:
self.segment = None
self.current_group.decided = True
self.current_group.save()
def _report_on_group(self, dest_output, group):
"""Report on the contents of a group to the given user or channel."""
for talk in group.talks:
self.msg(dest_output, '#{id}: {title} ({url})'.format(
id=talk.talk_id,
title=talk.title,
url=talk.review_url,
))
def chair_end(self, user, channel):
"""Conclude the meeting."""
self.msg(channel, "=== Th-th-th-that's all folks! ===")
# remove any state handler that may be present
self.bot.state_handler = None
# end the meeting
if self.meeting:
self.meeting.end = datetime.now()
self.meeting.save()
self.meeting = None
self._in_meeting = False
# pull out of this mode; ,end implies a reversion to skeleton mode
self.chair_mode(user, channel, 'none', _silent=True)
def private_current(self, user):
"""Spit out information about the current group."""
# sanity check: is there a current group?
if not self.current_group:
self.msg(user, 'There is no current group being discussed.')
return
# report on the current group
self.msg(user, 'The current group on the plate is: {0}'.format(self.current_group.name))
self._report_on_group(user, self.current_group)
def private_next(self, user):
"""Spit out information about the next group."""
# sanity check: is there an upcoming group?
if not self.next_group:
self.msg(user, 'There is no next group to be discussed.')
return
# report on the next group
self.msg(user, 'The next group on the plate is: {0}'.format(self.next_group.name))
self._report_on_group(user, self.next_group)
def private_voting(self, user):
"""Explain how voting is done."""
# if there is a current group, use examples from that group
examples = [92, 418]
if self.current_group:
examples = list(self.current_group.talk_ids)[0:1]
# explain what voting paradigms I understand
self.msg(user, 'I understand two voting paradigms:')
self.msg(user, '1. An absolute list of talks (e.g. `{0}, {1}`)'.format(*examples))
self.msg(user, '2. Two special keywords ("all", "none"), and the addition/removal of talks from those keywords or from your prior vote (e.g. `all -{1}` or `+{0}`).'.format(*examples))
def handler_silent_review(self, user, channel, message):
"""If a user speaks, tell them to be quiet, because it's the
silent review period."""
# tell the user to be quiet
self.msg(channel, '{user}: We are currently in the silent review period. Please be quiet.'.format(user=user))
def handler_user_votes(self, user, channel, message):
"""Record a user's vote."""
# parse out the vote into individual tokens, separated by commas,
# spaces, or both -- make this into a purely comma-separated vote
message = re.sub(r'/[\s]+/', ' ', message)
message = message.replace(', ', ',').replace(' ', ',')
vote = message.split(',')
# copy the user's former vote, if any
# we will modify `answer` instead of writing his vote directly to self.current_votes,
# so that if there's an error, we don't save only half the vote somehow
answer = set()
if user in self.current_votes:
answer = self.current_votes[user]
# ensure that every sub-piece of this vote is individually valid
# I currently understand:
# - integers on the talk_id list, optionally prefixed with [+-]
# - string "all"
# - string "none"
invalid_pieces = []
invalid_talk_ids = []
for piece in vote:
# I understand integers if they are on the talk_id list,
# including if they are prefixed with [+-]
if re.match(r'^[+-]?[\d]+$', piece):
talk_id = int(piece.replace('-', '').replace('+', ''))
if talk_id not in self.current_group.talk_ids:
invalid_talk_ids.append(talk_id)
continue
# I understand "all" and "none"
if piece == 'all' or piece == 'none':
continue
# I have no idea what this is
invalid_pieces.append(piece)
# sanity check: if I have any invalid tokens or talk_ids that aren't
# in the talk_id list, fail out now
if len(invalid_pieces) or len(invalid_talk_ids):
if len(invalid_pieces):
self.msg(channel, '{user}: I do not understand {tokens}.'.format(
user=user,
tokens=self._english_list(['"{0}"'.format(i) for i in invalid_pieces], conjunction='or'),
))
if len(invalid_talk_ids):
self.msg(channel, '{user}: You voted for {talks}, which {to_be_verb} not part of this group. Your vote has not been recorded.'.format(
talks=self._english_list(['#{0}'.format(i) for i in invalid_talk_ids]),
to_be_verb='is' if len(invalid_talk_ids) == 1 else 'are',
user=user,
))
return
# the simple case is that this is a "plain" vote -- a list of
# integers with no specials (e.g. "none") and no modifiers (+/-)
# this is straightforward: the vote becomes, in its entirety, the
# user's vote, and anything previously recorded for the user is
# simply dropped
if reduce(lambda x, y: bool(x) and bool(y), [re.match(r'^[\d]+$', i) for i in vote]):
self.current_votes[user] = set([int(i) for i in vote])
return
# sanity check: non-plain votes should not have *any* plain elements;
# therefore, if there are any, we should error out now
if reduce(lambda x, y: bool(x) or bool(y), [re.match(r'^[\d]+$', i) for i in vote]):
# use examples from the actual group to minimize confusion
examples = list(self.current_group.talk_ids)[0:2]
while len(examples) < 2:
examples.append(randint(1, 100)) # just in case
# spit out the error -- since this is long, send as much of it as possible to PMs
self.msg(channel, '{0}: I cannot process this vote. See your private messages for details.'.format(user))
self.msg(user, 'I cannot process this vote. I understand two voting paradigms:')
self.msg(user, '1. An absolute list of talks (e.g. `{0}, {1}`)'.format(*examples))
self.msg(user, '2. Two special keywords ("all", "none"), and the addition/removal of talks from those keywords or from your prior vote (e.g. `all -{1}` or `+{0}`).'.format(*examples))
self.msg(user, 'Your vote mixes these two paradigms together, and I don\'t know how to process that, so I am cowardly giving up.')
return
# sanity check: exclusive modifier votes only make sense if either
# 1. "all" or "none" is included in the vote -or-
# 2. the user has voted already
# if neither of these cases obtains, error out
if vote[0] not in ('all', 'none') and user not in self.current_votes:
self.msg(channel, '{0}: You can only modify your prior vote if you have already voted; you have not.'.format(user))
return
# sanity check (last one, for now): "all" or "none" only make sense at the
# *beginning* of a vote; don't take them at the end
if 'all' in vote[1:] or 'none' in vote[1:]:
self.msg(channel, '{0}: If using "all" or "none" in a complex vote, please use them exclusively at the beginning.'.format(user))
return
# okay, this is a valid vote with modifiers; parse it from left to right
# and process each of the modifiers
for piece in vote:
# first, is this "all" or "none"? these are the simplest
# cases -- either a full set or no set
if piece == 'all':
answer = copy(self.current_group.talk_ids)
if piece == 'none':
answer = set()
# add or remove votes with operators from the set
if piece.startswith('+'):
talk_id = int(piece[1:])
answer.add(talk_id)
if piece.startswith('-'):
talk_id = int(piece[1:])
answer.remove(talk_id)
# okay, we processed a valid vote without error; set it
self.current_votes[user] = answer
def event_user_joined(self, user, channel):
"""React to a user's joining the channel when a meeting is
already in progress."""
# sanity check: if we're not in a meeting, then no need
# to do anything at all
if not self._in_meeting:
return
# sanity check: if the user is already in the non-voter list,
# then this is a red herring; ignore it
if user in self.nonvoters:
return
# spit out a welcome, and a request for a name, to the meeting channel,
# but tailor the request to where we are
if self.segment == 'silent_review':
self.msg(channel, 'Howdy %s. Right now we are in the %s segment on talk #%d. Please print your name for the record, but wait until this segment concludes.' % (user, self.segment.replace('_', ' '), self.current.talk_id))
else:
self.msg(channel, 'Howdy %s; name for the record, please?' % user)
# also, send the user a welcome with information about
# where we are and what's going on
self.msg(user, 'Thanks for coming, %s! This meeting has already begun.' % user)
if self.current_group:
self.private_current(user)
else:
self.msg(user, 'There is no current talk under consideration at this moment.')
# now give a quick overview of bot abilities
self.msg(user, 'You may issue me commands via. private message if you like. Issue `help` at any time for a list.')
def log_message(self, user, channel, message):
"""Save the existing message to all appropriate transcripts."""
# if there is an active meeting, save this to the meeting transcript
if self.meeting:
self.meeting.add_to_transcript(datetime.now(), user, message)
# if there is a current group, save this to the transcript for each
# talk proposal within the group
if self.current_group:
self.current_group.add_to_transcript(datetime.now(), user, message)
for talk_id in self.current_group.talk_ids:
self.bot.log_target.log(talk_id, user, message)
| |
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import abc
import errno
import os
import signal
import sys
import time
import eventlet
from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
import functools
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from paste.deploy import loadwsgi
from routes import middleware
import webob.dec
import webob.exc
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common.i18n import _
from heat.common import serializers
LOG = logging.getLogger(__name__)
URL_LENGTH_LIMIT = 50000
api_opts = [
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.PortOpt('bind_port', default=8004,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', min=0, default=0,
help=_("Number of workers for Heat service. "
"Default value 0 means, that service will start number "
"of workers equal number of cores on server."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
]
api_group = cfg.OptGroup('heat_api')
cfg.CONF.register_group(api_group)
cfg.CONF.register_opts(api_opts,
group=api_group)
api_cfn_opts = [
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.PortOpt('bind_port', default=8000,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', min=0, default=1,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
]
api_cfn_group = cfg.OptGroup('heat_api_cfn')
cfg.CONF.register_group(api_cfn_group)
cfg.CONF.register_opts(api_cfn_opts,
group=api_cfn_group)
api_cw_opts = [
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been removed.',
deprecated_since='10.0.0'),
cfg.PortOpt('bind_port', default=8003,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been removed.',
deprecated_since='10.0.0'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been removed.',
deprecated_since='10.0.0'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been Removed.',
deprecated_since='10.0.0'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been Removed.',
deprecated_since='10.0.0'),
cfg.IntOpt('workers', min=0, default=1,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been Removed.',
deprecated_since='10.0.0'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs.)'),
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been Removed.',
deprecated_since='10.0.0'),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.'),
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch API has been Removed.',
deprecated_since='10.0.0')
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
cfg.CONF.register_opts(api_cw_opts,
group=api_cw_group)
wsgi_elt_opts = [
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help=_("If False, closes the client socket connection "
"explicitly.")),
cfg.IntOpt('client_socket_timeout', default=900,
help=_("Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever.")),
]
wsgi_elt_group = cfg.OptGroup('eventlet_opts')
cfg.CONF.register_group(wsgi_elt_group)
cfg.CONF.register_opts(wsgi_elt_opts,
group=wsgi_elt_group)
json_size_opt = cfg.IntOpt('max_json_body_size',
default=1048576,
help=_('Maximum raw byte size of JSON request body.'
' Should be larger than max_template_size.'))
cfg.CONF.register_opt(json_size_opt)
def list_opts():
yield None, [json_size_opt]
yield 'heat_api', api_opts
yield 'heat_api_cfn', api_cfn_opts
yield 'heat_api_cloudwatch', api_cw_opts
yield 'eventlet_opts', wsgi_elt_opts
def get_bind_addr(conf, default_port=None):
"""Return the host and port to bind to."""
return (conf.bind_host, conf.bind_port or default_port)
def get_socket(conf, default_port):
"""Bind socket to bind ip:port in conf.
Note: Mostly comes from Swift with a few small changes...
:param conf: a cfg.ConfigOpts object
:param default_port: port to bind to if none is specified in conf
:returns: a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = get_bind_addr(conf, default_port)
# TODO(jaypipes): eventlet's greened socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
cert_file = conf.cert_file
key_file = conf.key_file
use_ssl = cert_file or key_file
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=conf.backlog,
family=address_family)
except socket.error as err:
if err.errno != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(bind_addr)s "
"after trying for 30 seconds")
% {'bind_addr': bind_addr})
return sock
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, conf, threads=1000):
os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server")
self.name = name
self.threads = threads
self.children = set()
self.stale_children = set()
self.running = True
self.pgid = os.getpid()
self.conf = conf
try:
os.setpgid(self.pgid, self.pgid)
except OSError:
self.pgid = 0
def kill_children(self, *args):
"""Kills the entire process group."""
LOG.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
def hup(self, *args):
"""Reloads configuration files with zero down time."""
LOG.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
def start(self, application, default_port):
"""Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
:param default_port: Port to bind to if none is specified in conf
"""
eventlet.wsgi.MAX_HEADER_LINE = self.conf.max_header_line
self.application = application
self.default_port = default_port
self.configure_socket()
self.start_wsgi()
def start_wsgi(self):
workers = self.conf.workers
# childs == num of cores
if workers == 0:
childs_num = processutils.get_worker_count()
# launch only one GreenPool without childs
elif workers == 1:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
self.pool.spawn_n(self._single_run, self.application, self.sock)
return
# childs equal specified value of workers
else:
childs_num = workers
LOG.info("Starting %d workers", workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
while len(self.children) < childs_num:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self._remove_children(pid)
self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info('Caught keyboard interrupt. Exiting.')
os.killpg(0, signal.SIGTERM)
break
except exception.SIGHUPInterrupt:
self.reload()
continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
def configure_socket(self, old_conf=None, has_changed=None):
"""Ensure a socket exists and is appropriately configured.
This function is called on start up, and can also be
called in the event of a configuration reload.
When called for the first time a new socket is created.
If reloading and either bind_host or bind port have been
changed the existing socket must be closed and a new
socket opened (laws of physics).
In all other cases (bind_host/bind_port have not changed)
the existing socket is reused.
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
# Do we need a fresh socket?
new_sock = (old_conf is None or (
has_changed('bind_host') or
has_changed('bind_port')))
# Will we be using https?
use_ssl = not (not self.conf.cert_file or not self.conf.key_file)
# Were we using https before?
old_use_ssl = (old_conf is not None and not (
not old_conf.get('key_file') or
not old_conf.get('cert_file')))
# Do we now need to perform an SSL wrap on the socket?
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
# Do we now need to perform an SSL unwrap on the socket?
unwrap_sock = use_ssl is False and old_use_ssl is True
if new_sock:
self._sock = None
if old_conf is not None:
self.sock.close()
_sock = get_socket(self.conf, self.default_port)
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self._sock = _sock
if wrap_sock:
self.sock = ssl.wrap_socket(self._sock,
certfile=self.conf.cert_file,
keyfile=self.conf.key_file)
if unwrap_sock:
self.sock = self._sock
if new_sock and not use_ssl:
self.sock = self._sock
# Pick up newly deployed certs
if old_conf is not None and use_ssl is True and old_use_ssl is True:
if has_changed('cert_file'):
self.sock.certfile = self.conf.cert_file
if has_changed('key_file'):
self.sock.keyfile = self.conf.key_file
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.conf.tcp_keepidle)
if old_conf is not None and has_changed('backlog'):
self.sock.listen(self.conf.backlog)
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info('Removed dead child %s', pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info('Removed stale child %s', pid)
else:
LOG.warning('Unrecognised child %s', pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error('Not respawning child %d, cannot '
'recover from termination', pid)
if not self.children and not self.stale_children:
LOG.info(
'All workers have terminated. Exiting')
self.running = False
else:
if len(self.children) < self.conf.workers:
self.run_child()
def stash_conf_values(self):
"""Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed when the config
is reloaded.
"""
conf = {}
conf['bind_host'] = self.conf.bind_host
conf['bind_port'] = self.conf.bind_port
conf['backlog'] = self.conf.backlog
conf['key_file'] = self.conf.key_file
conf['cert_file'] = self.conf.cert_file
return conf
def reload(self):
"""Reload and re-apply configuration settings.
Existing child processes are sent a SIGHUP signal
and will exit after completing existing requests.
New child processes, which will have the updated
configuration, are spawned. This allows preventing
interruption to the service.
"""
def _has_changed(old, new, param):
old = old.get(param)
new = getattr(new, param)
return (new != old)
old_conf = self.stash_conf_values()
has_changed = functools.partial(_has_changed, old_conf, self.conf)
cfg.CONF.reload_config_files()
os.killpg(self.pgid, signal.SIGHUP)
self.stale_children = self.children
self.children = set()
# Ensure any logging config changes are picked up
logging.setup(cfg.CONF, self.name)
self.configure_socket(old_conf, has_changed)
self.start_wsgi()
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
def child_hup(*args):
"""Shuts down child processes, existing requests are handled."""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
eventlet.wsgi.is_accepting = False
self.sock.close()
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
# ignore the interrupt signal to avoid a race whereby
# a child worker receives the signal before the parent
# and is respawned unnecessarily as a result
signal.signal(signal.SIGINT, signal.SIG_IGN)
# The child has no need to stash the unwrapped
# socket, and the reference prevents a clean
# exit on sighup
self._sock = None
self.run_server()
LOG.info('Child %d exiting normally', os.getpid())
# self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here
sys.exit(0)
else:
LOG.info('Started child %s', pid)
self.children.add(pid)
def _pipe_watcher(self):
def _on_timeout_exit(*args):
LOG.info('Graceful shutdown timeout exceeded, '
'instantaneous exiting')
os._exit(1)
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read(1)
LOG.info('Parent process has died unexpectedly, exiting')
# allow up to 1 second for sys.exit to gracefully shutdown
signal.signal(signal.SIGALRM, _on_timeout_exit)
signal.alarm(1)
# do the same as child_hup
eventlet.wsgi.is_accepting = False
self.sock.close()
sys.exit(1)
def run_server(self):
"""Run a WSGI server."""
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
eventlet.hubs.use_hub('poll')
eventlet.patcher.monkey_patch(all=False, socket=True)
self.pool = eventlet.GreenPool(size=self.threads)
socket_timeout = cfg.CONF.eventlet_opts.client_socket_timeout or None
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
try:
eventlet.wsgi.server(
self.sock,
self.application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=self._logger,
debug=cfg.CONF.debug,
keepalive=cfg.CONF.eventlet_opts.wsgi_keep_alive,
socket_timeout=socket_timeout)
except socket.error as err:
if err.errno != errno.EINVAL:
raise
self.pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info("Starting single process server")
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=self._logger,
debug=cfg.CONF.debug)
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class to get information about the request and response.
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print('')
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in resp.headers.items():
print(key, "=", value)
print('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Prints the contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print('')
def debug_filter(app, conf, **local_conf):
return Debug(app)
class DefaultMethodController(object):
"""Controller that handles the OPTIONS request method.
This controller handles the OPTIONS request method and any of the HTTP
methods that are not explicitly implemented by the application.
"""
def options(self, req, allowed_methods, *args, **kwargs):
"""Return a response that includes the 'Allow' header.
Return a response that includes the 'Allow' header listing the methods
that are implemented. A 204 status code is used for this response.
"""
raise webob.exc.HTTPNoContent(headers=[('Allow', allowed_methods)])
def reject(self, req, allowed_methods, *args, **kwargs):
"""Return a 405 method not allowed error.
As a convenience, the 'Allow' header with the list of implemented
methods is included in the response as well.
"""
raise webob.exc.HTTPMethodNotAllowed(
headers=[('Allow', allowed_methods)])
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = middleware.RoutesMiddleware(self._dispatch, self.map)
@webob.dec.wsgify
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""Returns controller after matching the incoming request to a route.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404 or the
routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
supported = ('application/json',)
bm = self.accept.best_match(supported)
return bm or 'application/json'
def get_content_type(self, allowed_content_types):
"""Determine content type of the request body."""
if "Content-Type" not in self.headers:
raise exception.InvalidContentType(content_type=None)
content_type = self.content_type
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages('heat')
return self.accept_language.best_match(all_languages)
def is_json_content_type(request):
if request.method == 'GET':
try:
aws_content_type = request.params.get("ContentType")
except Exception:
aws_content_type = None
# respect aws_content_type when both available
content_type = aws_content_type or request.content_type
else:
content_type = request.content_type
# bug #1887882
# for back compatible for null or plain content type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if (content_type in ('JSON', 'application/json')
and request.body.startswith(b'{')):
return True
return False
class JSONRequestDeserializer(object):
def has_body(self, request):
"""Returns whether a Webob.Request object will possess an entity body.
:param request: Webob.Request object
"""
if (int(request.content_length or 0) > 0 and
is_json_content_type(request)):
return True
return False
def from_json(self, datastring):
try:
if len(datastring) > cfg.CONF.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).'
) % {'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return jsonutils.loads(datastring)
except ValueError as ex:
raise webob.exc.HTTPBadRequest(str(ex))
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class Resource(object):
"""WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, deserializer, serializer=None):
"""Initialisation of the WSGI app.
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.deserializer = deserializer
self.serializer = serializer
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# From reading the boto code, and observation of real AWS api responses
# it seems that the AWS api ignores the content-type in the html header
# Instead it looks at a "ContentType" GET query parameter
# This doesn't seem to be documented in the AWS cfn API spec, but it
# would appear that the default response serialization is XML, as
# described in the API docs, but passing a query parameter of
# ContentType=JSON results in a JSON serialized response...
content_type = request.params.get("ContentType")
LOG.info("Processing request: %(method)s %(path)s",
{'method': request.method, 'path': request.path})
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
LOG.debug(('Calling %(controller)s.%(action)s'),
{'controller': type(self.controller).__name__,
'action': action})
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
LOG.error('Exception handling resource: %s', err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
http_exc = translate_exception(err, request.best_match_language())
# NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
# treated by wsgi as responses ready to be sent back and they
# won't make it into the pipeline app that serializes errors
raise exception.HTTPExceptionDisguise(http_exc)
except webob.exc.HTTPException as err:
if isinstance(err, aws_exception.HeatAPIException):
# The AWS compatible API's don't use faultwrap, so
# we want to detect the HeatAPIException subclasses
# and raise rather than wrapping in HTTPExceptionDisguise
raise
if not isinstance(err, webob.exc.HTTPError):
# Some HTTPException are actually not errors, they are
# responses ready to be sent back to the users, so we don't
# error log, disguise or translate those
raise
if isinstance(err, webob.exc.HTTPServerError):
LOG.error(
"Returning %(code)s to user: %(explanation)s",
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
except exception.HeatException as err:
raise translate_exception(err, request.best_match_language())
except Exception as err:
log_exception(err, sys.exc_info())
raise translate_exception(err, request.best_match_language())
# Here we support either passing in a serializer or detecting it
# based on the content type.
try:
serializer = self.serializer
if serializer is None:
if content_type == "JSON":
serializer = serializers.JSONResponseSerializer()
else:
serializer = serializers.XMLResponseSerializer()
response = webob.Response(request=request)
self.dispatch(serializer, action, response, action_result)
return response
# return unserializable result (typically an exception)
except Exception:
# Here we should get API exceptions derived from HeatAPIException
# these implement get_unserialized_body(), which allow us to get
# a dict containing the unserialized error response.
# We only need to serialize for JSON content_type, as the
# exception body is pre-serialized to the default XML in the
# HeatAPIException constructor
# If we get something else here (e.g a webob.exc exception),
# this will fail, and we just return it without serializing,
# which will not conform to the expected AWS error response format
if content_type == "JSON":
try:
err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body)
except Exception:
LOG.warning("Unable to serialize exception response")
return action_result
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.debug else {}
LOG.error("Unexpected error occurred serving API: %s", err,
**args)
def translate_exception(exc, locale):
"""Translates all translatable elements of the given exception."""
if isinstance(exc, exception.HeatException):
exc.message = i18n.translate(exc.message, locale)
else:
err_msg = encodeutils.exception_to_unicode(exc)
exc.message = i18n.translate(err_msg, locale)
if isinstance(exc, webob.exc.HTTPError):
exc.explanation = i18n.translate(exc.explanation, locale)
exc.detail = i18n.translate(getattr(exc, 'detail', ''), locale)
return exc
class BasePasteFactory(object, metaclass=abc.ABCMeta):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
a __call__ method.
"""
KEY = None
def __init__(self, conf):
self.conf = conf
@abc.abstractmethod
def __call__(self, global_conf, **local_conf):
return
def _import_factory(self, local_conf):
"""Import an app/filter class.
Lookup the KEY from the PasteDeploy local conf and import the
class named there. This class can then be used as an app or
filter factory.
Note we support the <module>:<class> format.
Note also that if you do e.g.
key =
value
then ConfigParser returns a value with a leading newline, so
we strip() the value before using it.
"""
class_name = local_conf[self.KEY].replace(':', '.').strip()
return importutils.import_class(class_name)
class AppFactory(BasePasteFactory):
"""A Generic paste.deploy app factory.
This requires heat.app_factory to be set to a callable which returns a
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
The WSGI app constructor must accept a ConfigOpts object and a local config
dict as its two arguments.
"""
KEY = 'heat.app_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.app_factory protocol method."""
factory = self._import_factory(local_conf)
return factory(self.conf, **local_conf)
class FilterFactory(AppFactory):
"""A Generic paste.deploy filter factory.
This requires heat.filter_factory to be set to a callable which returns a
WSGI filter when invoked. The format is <module>:<callable> e.g.
[filter:cache]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.middleware.cache:CacheFilter
The WSGI filter constructor must accept a WSGI app, a ConfigOpts object and
a local config dict as its three arguments.
"""
KEY = 'heat.filter_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.filter_factory protocol method."""
factory = self._import_factory(local_conf)
def filter(app):
return factory(app, self.conf, **local_conf)
return filter
def setup_paste_factories(conf):
"""Set up the generic paste app and filter factories.
Set things up so that:
paste.app_factory = heat.common.wsgi:app_factory
and
paste.filter_factory = heat.common.wsgi:filter_factory
work correctly while loading PasteDeploy configuration.
The app factories are constructed at runtime to allow us to pass a
ConfigOpts object to the WSGI classes.
:param conf: a ConfigOpts object
"""
global app_factory, filter_factory
app_factory = AppFactory(conf)
filter_factory = FilterFactory(conf)
def teardown_paste_factories():
"""Reverse the effect of setup_paste_factories()."""
global app_factory, filter_factory
del app_factory
del filter_factory
def paste_deploy_app(paste_config_file, app_name, conf):
"""Load a WSGI app from a PasteDeploy configuration.
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
ensuring that the supplied ConfigOpts object is passed to the app and
filter constructors.
:param paste_config_file: a PasteDeploy config file
:param app_name: the name of the app/pipeline to load from the file
:param conf: a ConfigOpts object to supply to the app and its filters
:returns: the WSGI app
"""
setup_paste_factories(conf)
try:
return loadwsgi.loadapp("config:%s" % paste_config_file, name=app_name)
finally:
teardown_paste_factories()
| |
# -*- coding: utf-8 -*-
"""
XForms - Controllers
"""
module = request.controller
# -----------------------------------------------------------------------------
def create():
"""
Given a Table, returns an XForms to create an instance:
http://code.javarosa.org/wiki/buildxforms
http://www.w3schools.com/xforms/
http://oreilly.com/catalog/9780596003692/preview.html
Known field requirements that don't work properly:
IS_IN_DB
IS_NOT_ONE_OF
IS_EMAIL
IS_DATE_IN_RANGE
IS_DATETIME_IN_RANGE
"""
try:
tablename = request.args[0]
except:
session.error = T("Need to specify a table!")
redirect(URL(r=request))
title = tablename
table = s3db[tablename]
instance_list = []
bindings_list = []
controllers_list = []
itext_list = [TAG["text"](TAG["value"](s3.crud_strings[tablename].title_list),
_id="title")]
for fieldname in table.fields:
if fieldname in ["id", "created_on", "modified_on", "uuid", "mci",
"deleted", "created_by", "modified_by", "deleted_fk",
"owned_by_group", "owned_by_user"]:
# These will get added server-side
pass
elif table[fieldname].writable == False:
pass
else:
ref = "/" + title + "/" + fieldname
instance_list.append(generate_instance(table, fieldname))
bindings_list.append(generate_bindings(table, fieldname, ref))
controller, _itext_list = generate_controllers(table, fieldname, ref)
controllers_list.append(controller)
itext_list.extend(_itext_list)
#bindings_list.append(TAG["itext"](TAG["translation"](itext_list, _lang="eng")))
instance = TAG[title](instance_list, _xmlns="")
bindings = bindings_list
controllers = TAG["h:body"](controllers_list)
response.headers["Content-Type"] = "text/xml"
response.view = "xforms.xml"
return dict(title=title, instance=instance, bindings=bindings,
controllers=controllers, itext_list=itext_list)
# -----------------------------------------------------------------------------
def uses_requirement(requirement, field):
"""
Check if a given database field uses the specified requirement
(IS_IN_SET, IS_INT_IN_RANGE, etc)
"""
if hasattr(field.requires, "other") or requirement in str(field.requires):
if hasattr(field.requires, "other"):
if requirement in str(field.requires.other):
return True
elif requirement in str(field.requires):
return True
return False
# -----------------------------------------------------------------------------
def generate_instance(table, fieldname):
"""
Generates XML for the instance of the specified field.
"""
if table[fieldname].default:
instance = TAG[fieldname](table[fieldname].default)
else:
instance = TAG[fieldname]()
return instance
# -----------------------------------------------------------------------------
def generate_bindings(table, fieldname, ref):
"""
Generates the XML for bindings for the specified database field.
"""
field = table[fieldname]
if "IS_NOT_EMPTY" in str(field.requires):
required = "true()"
else:
required = "false()"
if field.type == "string":
_type = "string"
elif field.type == "double":
_type = "decimal"
# Collect doesn't support datetime yet
elif field.type == "date":
_type = "date"
elif field.type == "datetime":
_type = "datetime"
elif field.type == "integer":
_type = "int"
elif field.type == "boolean":
_type = "boolean"
elif field.type == "upload": # For images
_type = "binary"
elif field.type == "text":
_type = "text"
else:
# Unknown type
_type = "string"
if uses_requirement("IS_INT_IN_RANGE", field) \
or uses_requirement("IS_FLOAT_IN_RANGE", field):
if hasattr(field.requires, "other"):
maximum = field.requires.other.maximum
minimum = field.requires.other.minimum
else:
maximum = field.requires.maximum
minimum = field.requires.minimum
if minimum is None:
constraint = "(. < " + str(maximum) + ")"
elif maximum is None:
constraint = "(. > " + str(minimum) + ")"
else:
constraint = "(. > " + str(minimum) + " and . < " + str(maximum) + ")"
binding = TAG["bind"](_nodeset=ref,
_type=_type,
_required=required,
_constraint=constraint)
#elif uses_requirement("IS_DATETIME_IN_RANGE", field):
# pass
#elif uses_requirement("IS_EMAIL", field):
# pass
elif uses_requirement("IS_IN_SET", field):
binding = TAG["bind"](_nodeset=ref, _required=required)
else:
binding = TAG["bind"](_nodeset=ref, _type=_type, _required=required)
return binding
# -----------------------------------------------------------------------------
def generate_controllers(table, fieldname, ref):
"""
Generates the controllers XML for the database table field.
"""
itext_list = [] # Internationalization
controllers_list = []
field = table[fieldname]
itext_list.append(TAG["text"](TAG["value"](field.label),
_id=ref + ":label"))
itext_list.append(TAG["text"](TAG["value"](field.comment),
_id=ref + ":hint"))
if hasattr(field.requires, "option"):
items_list = []
for option in field.requires.theset:
items_list.append(TAG["item"](TAG["label"](option), TAG["value"](option)))
controllers_list.append(TAG["select1"](items_list, _ref=fieldname))
#elif uses_requirement("IS_IN_DB", field):
# ToDo (similar to IS_IN_SET)?
#pass
#elif uses_requirement("IS_NOT_ONE_OF", field):
# ToDo
#pass
elif uses_requirement("IS_IN_SET", field): # Defined below
if hasattr(field.requires, "other"):
insetrequires = field.requires.other
else:
insetrequires = field.requires
theset = insetrequires.theset
items_list = []
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
if theset:
option_num = 0 # for formatting something like "jr:itext('stuff:option0')"
for option in theset:
if field.type == "integer":
option = int(option)
option_ref = ref + ":option" + str(option_num)
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + option_ref + "')"),
TAG["value"](option)))
#itext_list.append(TAG["text"](TAG["value"](field.represent(option)), _id=option_ref))
itext_list.append(TAG["text"](TAG["value"](insetrequires.labels[theset.index(str(option))]),
_id=option_ref))
option_num += 1
if insetrequires.multiple:
controller = TAG["select"](items_list, _ref=ref)
else:
controller = TAG["select1"](items_list, _ref=ref)
elif field.type == "boolean": # Using select1, is there an easier way to do this?
items_list=[]
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
# True option
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option0')"),
TAG["value"](1)))
itext_list.append(TAG["text"](TAG["value"]("True"),
_id=ref + ":option0"))
# False option
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option1')"),
TAG["value"](0)))
itext_list.append(TAG["text"](TAG["value"]("False"),
_id=ref + ":option1"))
controller = TAG["select1"](items_list, _ref=ref)
elif field.type == "upload": # For uploading images
items_list=[]
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
controller = TAG["upload"](items_list, _ref=ref, _mediatype="image/*")
elif field.writable == False:
controller = TAG["input"](TAG["label"](field.label), _ref=ref,
_readonly="true",
_default=field.default.upper())
else:
# Normal Input field
controller = TAG["input"](TAG["label"](field.label), _ref=ref)
return controller, itext_list
# -----------------------------------------------------------------------------
def csvdata(nodelist):
"""
Returns the data in the given node as a comma separated string
"""
data = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
try:
data = data + "," + subnode.childNodes[0].data
except:
data = data+ ","
return data[1:] + "\n"
# -----------------------------------------------------------------------------
def csvheader(parent, nodelist):
"""
Gives the header for the CSV
"""
header = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
header = header + "," + parent + "." + subnode.tagName
return header[1:] + "\n"
# -----------------------------------------------------------------------------
def importxml(db, xmlinput):
"""
Converts the XML to a CSV compatible with the import_from_csv_file of web2py
@ToDo: rewrite this to go via S3Resource for proper Auth checking, Audit.
"""
import cStringIO
import xml.dom.minidom
try:
doc = xml.dom.minidom.parseString(xmlinput)
except:
raise Exception("XML parse error")
parent = doc.childNodes[0].tagName
csvout = csvheader(parent, doc.childNodes[0].childNodes)
for subnode in doc.childNodes:
csvout = csvout + csvdata(subnode.childNodes)
fh = cStringIO.StringIO()
fh.write(csvout)
fh.seek(0, 0)
db[parent].import_from_csv_file(fh)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def post():
data = importxml(db, request.body.read())
return data
# -----------------------------------------------------------------------------
def formList():
"""
Generates a list of Xforms based on database tables for ODK Collect
http://code.google.com/p/opendatakit/
"""
# Test statements
#xml = TAG.forms(*[TAG.form(getName("Name"), _url = "http://" + request.env.http_host + URL(c="static", "current.xml"))])
#xml = TAG.forms(*[TAG.form(getName(t), _url = "http://" + request.env.http_host + URL(f="create", args=t)) for t in db.tables()])
# List of a couple simple tables to avoid a giant list of all the tables
#tables = ["pf_missing_report", "pr_presence"]
tables = ["irs_ireport", "rms_req", "cr_shelter", "pr_person", "pr_image"]
xml = TAG.forms()
for tablename in tables:
xml.append(TAG.form(get_name(tablename),
_url = "http://" + request.env.http_host + URL(f="create", args=tablename)))
response.headers["Content-Type"] = "text/xml"
response.view = "xforms.xml"
return xml
# -----------------------------------------------------------------------------
def submission():
"""
Allows for submission of Xforms by ODK Collect
http://code.google.com/p/opendatakit/
"""
# @ToDo: Something better than this crude check
if not auth.s3_logged_in():
auth.permission.fail()
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import cgi
from lxml import etree
source = request.post_vars.get("xml_submission_file", None)
if isinstance(source, cgi.FieldStorage):
if source.filename:
xmlinput = source.file
else:
xmlinput = source.value
if isinstance(xmlinput, basestring):
xmlinput = StringIO(xmlinput)
else:
raise HTTP(400, "Invalid Request: Expected an XForm")
tree = etree.parse(xmlinput)
tablename = tree.getroot().tag
resource = s3db.resource(tablename)
stylesheet = os.path.join(request.folder, "static", "formats", "odk",
"import.xsl")
try:
result = resource.import_xml(source=tree, stylesheet=stylesheet)
except IOError, SyntaxError:
raise HTTP(500, "Internal server error")
# Parse response
status = json.loads(result)["statuscode"]
if status == 200:
r = HTTP(201, "Saved") # ODK Collect only accepts 201
r.headers["Location"] = request.env.http_host
raise r
else:
raise HTTP(status, result)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(2)
def submission_old():
"""
Allows for submission of xforms by ODK Collect
http://code.google.com/p/opendatakit/
"""
response.headers["Content-Type"] = "text/xml"
xml = str(request.post_vars.xml_submission_file.value)
if len(xml) == 0:
raise HTTP(400, "Need some xml!")
importxml(db, xml)
r = HTTP(201, "Saved.")
r.headers["Location"] = request.env.http_host
raise r
# -----------------------------------------------------------------------------
def get_name(tablename):
"""
Generates a pretty(er) name from a database table name.
"""
return tablename[tablename.find("_") + 1:].replace("_", " ").capitalize()
# END =========================================================================
| |
#!/usr/bin/env python
"""A variety of methods to solve ODE boundary value problems.
AUTHOR:
Jonathan Senning <jonathan.senning@gordon.edu>
Gordon College
Based Octave functions written in the spring of 1999
Python version: October-November 2008
"""
import numpy
#-----------------------------------------------------------------------------
def shoot( f, a, b, z1, z2, t, tol ):
"""Implements the shooting method to solve second order BVPs
USAGE:
y = shoot(f, a, b, z1, z2, t, tol)
INPUT:
f - function dy/dt = f(y,t). Since we are solving a second-
order boundary-value problem that has been transformed
into a first order system, this function should return a
1x2 array with the first entry equal to y and the second
entry equal to y'.
a - solution value at the left boundary: a = y(t[0]).
b - solution value at the right boundary: b = y(t[n-1]).
z1 - first initial estimate of y'(t[0]).
z1 - second initial estimate of y'(t[0]).
t - array of n time values to determine y at.
tol - allowable tolerance on right boundary: | b - y[n-1] | < tol
OUTPUT:
y - array of solution function values corresponding to the
values in the supplied array t.
NOTE:
This function assumes that the second order BVP has been converted to
a first order system of two equations. The secant method is used to
refine the initial values of y' used for the initial value problems.
"""
from diffeq import rk4
max_iter = 25 # Maximum number of shooting iterations
n = len( t ) # Determine the size of the arrays we will generate
# Compute solution to first initial value problem (IVP) with y'(a) = z1.
# Because we are using the secant method to refine our estimates of z =
# y', we don't really need all the solution of the IVP, just the last
# point of it -- this is saved in w1.
y = rk4( f, [a,z1], t )
w1 = y[n-1,0]
print "%2d: z = %10.3e, error = %10.3e" % ( 0, z1, b - w1 )
# Begin the main loop. We will compute the solution of a second IVP and
# then use the both solutions to refine our estimate of y'(a). This
# second solution then replaces the first and a new "second" solution is
# generated. This process continues until we either solve the problem to
# within the specified tolerance or we exceed the maximum number of
# allowable iterations.
for i in xrange( max_iter ):
# Solve second initial value problem, using y'(a) = z2. We need to
# retain the entire solution vector y since if y(t(n)) is close enough
# to b for us to stop then the first column of y becomes our solution
# vector.
y = rk4( f, [a,z2], t )
w2 = y[n-1,0]
print "%2d: z = %10.3e, error = %10.3e" % ( i+1, z2, b - w2 )
# Check to see if we are done...
if abs( b - w2 ) < tol:
break
# Compute the new approximations to the initial value of the first
# derivative. We compute z2 using a linear fit through (z1,w1) and
# (z2,w2) where w1 and w2 are the estimates at t=b of the initial
# value problems solved above with y1'(a) = z1 and y2'(a) = z2. The
# new value for z1 is the old value of z2.
#z1, z2 = ( z2, z1 + ( z2 - z1 ) / ( w2 - w1 ) * ( b - w1 ) )
z1, z2 = ( z2, z2 + ( z2 - z1 ) / ( w2 - w1 ) * ( b - w2 ) )
w1 = w2
# All done. Check to see if we really solved the problem, and then return
# the solution.
if abs( b - w2 ) >= tol:
print "\a**** ERROR ****"
print "Maximum number of iterations (%d) exceeded" % max_iter
print "Returned values may not have desired accuracy"
print "Error estimate of returned solution is %e" % ( b - w2 )
return y[:,0]
#-----------------------------------------------------------------------------
def fd( u, v, w, t, a, b ):
"""Implements the shooting method to solve linear second order BVPs
Compute finite difference solution to the BVP
x'' = u(t) + v(t) x + w(t) x'
x(t[0]) = a, x(t[n-1]) = b
t should be passed in as an n element array. u, v, and w should be
either n element arrays corresponding to u(t), v(t) and w(t) or
scalars, in which case an n element array with the given value is
generated for each of them.
USAGE:
x = fd(u, v, w, t, a, b)
INPUT:
u,v,w - arrays containing u(t), v(t), and w(t) values. May be
specified as Python lists, NumPy arrays, or scalars. In
each case they are converted to NumPy arrays.
t - array of n time values to determine x at
a - solution value at the left boundary: a = x(t[0])
b - solution value at the right boundary: b = x(t[n-1])
OUTPUT:
x - array of solution function values corresponding to the
values in the supplied array t.
"""
# Get the dimension of t and make sure that t is an n-element vector
if type( t ) != numpy.ndarray:
if type( t ) == list:
t = numpy.array( t )
else:
t = numpy.array( [ float( t ) ] )
n = len( t )
# Make sure that u, v, and w are either scalars or n-element vectors.
# If they are scalars then we create vectors with the scalar value in
# each position.
if type( u ) == int or type( u ) == float:
u = numpy.array( [ float( u ) ] * n )
if type( v ) == int or type( v ) == float:
v = numpy.array( [ float( v ) ] * n )
if type( w ) == int or type( w ) == float:
w = numpy.array( [ float( w ) ] * n )
# Compute the stepsize. It is assumed that all elements in t are
# equally spaced.
h = t[1] - t[0];
# Construct tridiagonal system; boundary conditions appear as first and
# last equations in system.
A = -( 1.0 + w[1:n] * h / 2.0 )
A[-1] = 0.0
C = -( 1.0 - w[0:n-1] * h / 2.0 )
C[0] = 0.0
D = 2.0 + h * h * v
D[0] = D[n-1] = 1.0
B = - h * h * u
B[0] = a
B[n-1] = b
# Solve tridiagonal system
for i in xrange( 1, n ):
xmult = A[i-1] / D[i-1]
D[i] = D[i] - xmult * C[i-1]
B[i] = B[i] - xmult * B[i-1]
x = numpy.zeros( n )
x[n-1] = B[n-1] / D[n-1]
for i in xrange( n - 2, -1, -1 ):
x[i] = ( B[i] - C[i] * x[i+1] ) / D[i]
return x
#-----------------------------------------------------------------------------
if __name__ == "__main__":
import math
from pylab import *
# Solves x'' = x + 4exp(t), x(0)=1, x(1/2) = 2exp(1/2) using both the
# finite difference method and the shooting method.
# Set up interval. We will solve the problem for both n=64 and n=128.
a = 0.0
b = 0.5
n1 = 64
n2 = 128
t1 = linspace( a, b, n1 )
t2 = linspace( a, b, n2 )
# Compute exact solutions. The transpose of the solution is taken because
# both the finite difference function fd() and the shooting method function
# shoot() return nx1 vectors rather than 1xn vectors.
def exact(t):
return exp(t) * ( 1 + 2 * t )
x1 = exact( t1 )
x2 = exact( t2 )
# Compute finite difference solutions
xfd1 = fd( 4 * exp( t1 ), 1, 0, t1, 1, 2 * exp( 0.5 ) )
xfd2 = fd( 4 * exp( t2 ), 1, 0, t2, 1, 2 * exp( 0.5 ) )
# Compute shooting method solutions
def f(x,t):
return array( [x[1], x[0]+4*exp(t)] )
xs1 = shoot( f, exp(a), 2*exp(b), 3.0, 4.0, t1, 1e-5 )
xs2 = shoot( f, exp(a), 2*exp(b), 3.0, 4.0, t2, 1e-5 )
# Prepare for display; set interactive mode to true so each plot
# is shown as it is generated
interactive( True )
# Plot solutions
plot( t1, xfd1, 'ro', t2, xfd2, 'b-' )
title( 'Finite Difference Method' )
xlabel( '$t$' )
ylabel( '$x$' )
legend( ( '%3d points' % n1, '%3d points' % n2 ), loc='lower right' )
draw()
z = raw_input( "Press ENTER to continue..." );
cla()
plot( t1, xs1, 'ro', t2, xs2, 'b-' )
title( 'Shooting Method' )
xlabel( '$t$' )
ylabel( '$x$' )
legend( ( '%3d points' % n1, '%3d points' % n2 ), loc='lower right' )
draw()
z = raw_input( "Press ENTER to continue..." )
# Plot errors
cla()
plot( t1, xfd1 - x1, 'ro', t2, xfd2 - x2, 'b-' )
title( 'Finite Difference Errors' )
xlabel( '$t$' )
ylabel( '$x$' )
legend( ( '%3d points' % n1, '%3d points' % n2 ), loc='center' )
draw()
z = raw_input( "Press ENTER to continue..." );
cla()
plot( t1, xs1 - x1, 'ro', t2, xs2 - x2, 'b-' )
title( 'Shooting Method Errors' )
xlabel( '$t$' )
ylabel( '$x$' )
legend( ( '%3d points' % n1, '%3d points' % n2 ), loc='center' )
draw()
z = raw_input( "Press ENTER to continue..." );
cla()
plot( t1, xfd1 - x1, 'ro-', t1, xs1 - x1, 'b-' )
# fiddle with ymin value so that shooting method error is not at
# bottom of window
win = list( axis() )
win[2] = win[2] - 0.1 * ( win[3] - win[2] );
axis( tuple( win ) )
title( 'Finite Difference and Shooting Method Errors: %d points' % n1 )
xlabel( '$t$' )
ylabel( '$x$' )
legend( ( 'Finite Differences', 'Shooting' ), loc='center' )
draw()
z = raw_input( "Press ENTER to quit..." );
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_scale_set_vm_run_commands_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetVMRunCommandsOperations:
"""VirtualMachineScaleSetVMRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommand",
**kwargs: Any
) -> "_models.VirtualMachineRunCommand":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(run_command, 'VirtualMachineRunCommand')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommand",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineRunCommand"]:
"""The operation to create or update the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param run_command: Parameters supplied to the Create Virtual Machine RunCommand operation.
:type run_command: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommand
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineRunCommand or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommand]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
run_command=run_command,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommandUpdate",
**kwargs: Any
) -> "_models.VirtualMachineRunCommand":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(run_command, 'VirtualMachineRunCommandUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommandUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineRunCommand"]:
"""The operation to update the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param run_command: Parameters supplied to the Update Virtual Machine RunCommand operation.
:type run_command: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommandUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineRunCommand or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommand]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
run_command=run_command,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to delete the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualMachineRunCommand":
"""The operation to get the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineRunCommand, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommand
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineRunCommandsListResult"]:
"""The operation to get all run commands of an instance in Virtual Machine Scaleset.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineRunCommandsListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineRunCommandsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommandsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineRunCommandsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands'} # type: ignore
| |
#!/usr/bin/python
# blastn tblastn blastx parser revised 14-1-2016.
# drosofff@gmail.com
import sys
import argparse
from collections import defaultdict
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument('--blast', action="store", type=str, help="Path to the blast output (tabular format, 12 column)")
the_parser.add_argument('--sequences', action="store", type=str, help="Path to the fasta file with blasted sequences")
the_parser.add_argument('--fastaOutput', action="store", type=str, help="fasta output file of blast hits")
the_parser.add_argument('--tabularOutput', action="store", type=str, help="tabular output file of blast analysis")
the_parser.add_argument('--flanking', action="store", type=int, help="number of flanking nucleotides added to the hit sequences")
the_parser.add_argument('--mode', action="store", choices=["verbose", "short"], type=str, help="reporting (verbose) or not reporting (short) oases contigs")
the_parser.add_argument('--filter_relativeCov', action="store", type=float, default=0, help="filter out relative coverages below the specified ratio (float number)")
the_parser.add_argument('--filter_maxScore', action="store", type=float, default=0, help="filter out best BitScores below the specified float number")
the_parser.add_argument('--filter_meanScore', action="store", type=float, default=0, help="filter out mean BitScores below the specified float number")
the_parser.add_argument('--filter_term_in', action="store", type=str, default="", help="select the specified term in the subject list")
the_parser.add_argument('--filter_term_out', action="store", type=str, default="", help="exclude the specified term from the subject list")
the_parser.add_argument('--al_sequences', action="store", type=str, help="sequences that have been blast aligned")
the_parser.add_argument('--un_sequences', action="store", type=str, help="sequences that have not been blast aligned")
the_parser.add_argument('--dataset_name', action="store", type=str, default="", help="the name of the dataset that has been parsed, to be reported in the output")
args = the_parser.parse_args()
if not all ( (args.sequences, args.blast, args.fastaOutput, args.tabularOutput) ):
the_parser.error('argument(s) missing, call the -h option of the script')
if not args.flanking:
args.flanking = 0
return args
def median(lst):
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) %2 == 1:
return lst[((len(lst)+1)/2)-1]
if len(lst) %2 == 0:
return float(sum(lst[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0
def mean(lst):
if len(lst) < 1:
return 0
return sum(lst) / float(len(lst))
def getfasta (fastafile):
fastadic = {}
for line in open (fastafile):
if line[0] == ">":
header = line[1:-1]
fastadic[header] = ""
else:
fastadic[header] += line
for header in fastadic:
fastadic[header] = "".join(fastadic[header].split("\n"))
return fastadic
def insert_newlines(string, every=60):
lines = []
for i in xrange(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def getblast (blastfile):
'''blastinfo [0] Percentage of identical matches
blastinfo [1] Alignment length
blastinfo [2] Number of mismatches
blastinfo [3] Number of gap openings
blastinfo [4] Start of alignment in query
blastinfo [5] End of alignment in query
blastinfo [6] Start of alignment in subject (database hit)
blastinfo [7] End of alignment in subject (database hit)
blastinfo [8] Expectation value (E-value)
blastinfo [9] Bit score
blastinfo [10] Subject length (NEED TO BE SPECIFIED WHEN RUNNING BLAST) '''
blastdic = defaultdict (dict)
for line in open (blastfile):
fields = line[:-1].split("\t")
transcript = fields[0]
subject = fields[1]
blastinfo = [float(fields[2]) ] # blastinfo[0]
blastinfo = blastinfo + [int(i) for i in fields[3:10] ] # blastinfo[1:8] insets 1 to 7
blastinfo.append(fields[10]) # blastinfo[8] E-value remains as a string type
blastinfo.append(float(fields[11])) # blastinfo[9] Bit score
blastinfo.append(int(fields[12])) # blastinfo[10] Subject length MUST BE RETRIEVED THROUGH A 13 COLUMN BLAST OUTPUT
try:
blastdic[subject][transcript].append(blastinfo)
except:
blastdic[subject][transcript] = [ blastinfo ]
return blastdic
def getseq (fastadict, transcript, up, down, orientation="direct"):
def reverse (seq):
revdict = {"A":"T","T":"A","G":"C","C":"G","N":"N"}
revseq = [revdict[i] for i in seq[::-1]]
return "".join(revseq)
pickseq = fastadict[transcript][up-1:down]
if orientation == "direct":
return pickseq
else:
return reverse(pickseq)
def subjectCoverage (fastadict, blastdict, subject, QueriesFlankingNucleotides=0):
SubjectCoverageList = []
HitDic = {}
bitScores = []
for transcript in blastdict[subject]:
prefix = "%s--%s_" % (subject, transcript)
hitNumber = 0
for hit in blastdict[subject][transcript]:
hitNumber += 1
suffix = "hit%s_IdMatch=%s,AligLength=%s,E-val=%s" % (hitNumber, hit[0], hit[1], hit[8])
HitDic[prefix+suffix] = GetHitSequence (fastadict, transcript, hit[4], hit[5], QueriesFlankingNucleotides) #query coverage by a hit is in hit[4:6]
SubjectCoverageList += range (min([hit[6], hit[7]]), max([hit[6], hit[7]]) + 1) # subject coverage by a hit is in hit[6:8]
bitScores.append(hit[9])
subjectLength = hit [10] # always the same value for a given subject. Stupid but simple
TotalSubjectCoverage = len ( set (SubjectCoverageList) )
RelativeSubjectCoverage = TotalSubjectCoverage/float(subjectLength)
return HitDic, subjectLength, TotalSubjectCoverage, RelativeSubjectCoverage, max(bitScores), mean(bitScores)
def GetHitSequence (fastadict, FastaHeader, leftCoordinate, rightCoordinate, FlankingValue):
if rightCoordinate > leftCoordinate:
polarity = "direct"
else:
polarity = "reverse"
leftCoordinate, rightCoordinate = rightCoordinate, leftCoordinate
if leftCoordinate - FlankingValue > 0:
leftCoordinate -= FlankingValue
else:
leftCoordinate = 1
return getseq (fastadict, FastaHeader, leftCoordinate, rightCoordinate, polarity)
def outputParsing (dataset_name, F, Fasta, results, Xblastdict, fastadict, filter_relativeCov=0, filter_maxScore=0, filter_meanScore=0, filter_term_in="", filter_term_out="", mode="verbose"):
def filter_results (results, filter_relativeCov=0, filter_maxScore=0, filter_meanScore=0, filter_term_in="", filter_term_out=""):
for subject in results.keys():
if results[subject]["RelativeSubjectCoverage"]<filter_relativeCov:
del results[subject]
continue
if results[subject]["maxBitScores"]<filter_maxScore:
del results[subject]
continue
if results[subject]["meanBitScores"]<filter_meanScore:
del results[subject]
continue
if filter_term_in in subject:
pass
else:
del results[subject]
continue
if filter_term_out and filter_term_out in subject:
del results[subject]
continue
return results
F= open(F, "w")
Fasta=open(Fasta, "w")
blasted_transcripts = []
filter_results (results, filter_relativeCov, filter_maxScore, filter_meanScore, filter_term_in, filter_term_out)
for subject in results:
for transcript in Xblastdict[subject]:
blasted_transcripts.append(transcript)
blasted_transcripts = list( set( blasted_transcripts))
if mode == "verbose":
print >>F, "--- %s ---" % (dataset_name)
print >>F, "# SeqId\t%Identity\tAlignLength\tStartSubject\tEndSubject\t%QueryHitCov\tE-value\tBitScore"
for subject in sorted (results, key=lambda x: results[x]["meanBitScores"], reverse=True):
print >> F, " \n# %s" % subject
print >> F, "# Suject Length: %s" % (results[subject]["subjectLength"])
print >> F, "# Total Subject Coverage: %s" % (results[subject]["TotalCoverage"])
print >> F, "# Relative Subject Coverage: %s" % (results[subject]["RelativeSubjectCoverage"])
print >> F, "# Best Bit Score: %s" % (results[subject]["maxBitScores"])
print >> F, "# Mean Bit Score: %s" % (results[subject]["meanBitScores"])
for header in results[subject]["HitDic"]:
print >> Fasta, ">%s\n%s" % (header, insert_newlines(results[subject]["HitDic"][header]) )
print >> Fasta, "" # final carriage return for the sequence
for transcript in Xblastdict[subject]:
transcriptSize = float(len(fastadict[transcript]))
for hit in Xblastdict[subject][transcript]:
percentIdentity, alignLenght, subjectStart, subjectEnd, queryCov = hit[0], hit[1], hit[6], hit[7], "%.1f" % (abs(hit[5]-hit[4])/transcriptSize*100)
Eval, BitScore = hit[8], hit[9]
info = [transcript] + [percentIdentity, alignLenght, subjectStart, subjectEnd, queryCov, Eval, BitScore]
info = [str(i) for i in info]
info = "\t".join(info)
print >> F, info
else:
print >>F, "--- %s ---" % (dataset_name)
print >>F, "# subject\tsubject length\tTotal Subject Coverage\tRelative Subject Coverage\tBest Bit Score\tMean Bit Score"
for subject in sorted (results, key=lambda x: results[x]["meanBitScores"], reverse=True):
line = []
line.append(subject)
line.append(results[subject]["subjectLength"])
line.append(results[subject]["TotalCoverage"])
line.append(results[subject]["RelativeSubjectCoverage"])
line.append(results[subject]["maxBitScores"])
line.append(results[subject]["meanBitScores"])
line = [str(i) for i in line]
print >> F, "\t".join(line)
for header in results[subject]["HitDic"]:
print >> Fasta, ">%s\n%s" % (header, insert_newlines(results[subject]["HitDic"][header]) )
print >> Fasta, "" # final carriage return for the sequence
F.close()
Fasta.close()
return blasted_transcripts
def dispatch_sequences (fastadict, blasted_transcripts, matched_sequences, unmatched_sequences):
'''to output the sequences that matched and did not matched in the blast'''
F_matched = open (matched_sequences, "w")
F_unmatched = open (unmatched_sequences, "w")
for transcript in fastadict:
if transcript in blasted_transcripts: # le list of blasted_transcripts is generated by the outputParsing function
print >> F_matched, ">%s\n%s" % (transcript, insert_newlines(fastadict[transcript]) )
else:
print >> F_unmatched, ">%s\n%s" % (transcript, insert_newlines(fastadict[transcript]) )
F_matched.close()
F_unmatched.close()
return
def __main__ ():
args = Parser()
fastadict = getfasta (args.sequences)
Xblastdict = getblast (args.blast)
results = defaultdict(dict)
for subject in Xblastdict:
results[subject]["HitDic"], results[subject]["subjectLength"], results[subject]["TotalCoverage"], results[subject]["RelativeSubjectCoverage"], results[subject]["maxBitScores"], results[subject]["meanBitScores"] = subjectCoverage(fastadict, Xblastdict, subject, args.flanking)
blasted_transcripts = outputParsing (args.dataset_name, args.tabularOutput, args.fastaOutput, results, Xblastdict, fastadict,
filter_relativeCov=args.filter_relativeCov, filter_maxScore=args.filter_maxScore,
filter_meanScore=args.filter_meanScore, filter_term_in=args.filter_term_in,
filter_term_out=args.filter_term_out, mode=args.mode)
dispatch_sequences (fastadict, blasted_transcripts, args.al_sequences, args.un_sequences)
if __name__=="__main__": __main__()
| |
"""
SoftLayer.dedicatedhost
~~~~~~~~~~~~~~~~~~~~~~~
DH Manager/helpers
:license: MIT, see License for more details.
"""
import logging
import SoftLayer
from SoftLayer.managers import ordering
from SoftLayer import utils
# Invalid names are ignored due to long method names and short argument names
# pylint: disable=invalid-name, no-self-use
LOGGER = logging.getLogger(__name__)
class DedicatedHostManager(utils.IdentifierMixin, object):
"""Manages SoftLayer Dedicated Hosts.
See product information here https://www.ibm.com/cloud/dedicated
:param SoftLayer.API.BaseClient client: the client instance
:param SoftLayer.managers.OrderingManager ordering_manager: an optional manager to handle ordering.
If none is provided, one will be auto initialized.
"""
def __init__(self, client, ordering_manager=None):
self.client = client
self.account = client['Account']
self.host = client['Virtual_DedicatedHost']
self.guest = client['Virtual_Guest']
if ordering_manager is None:
self.ordering_manager = ordering.OrderingManager(client)
def cancel_host(self, host_id):
"""Cancel a dedicated host immediately, it fails if there are still guests in the host.
:param host_id: The ID of the dedicated host to be cancelled.
:return: True on success or an exception
Example::
# Cancels dedicated host id 12345
result = mgr.cancel_host(12345)
"""
return self.host.deleteObject(id=host_id)
def cancel_guests(self, host_id):
"""Cancel all guests into the dedicated host immediately.
To cancel an specified guest use the method VSManager.cancel_instance()
:param host_id: The ID of the dedicated host.
:return: The id, fqdn and status of all guests into a dictionary. The status
could be 'Cancelled' or an exception message, The dictionary is empty
if there isn't any guest in the dedicated host.
Example::
# Cancel guests of dedicated host id 12345
result = mgr.cancel_guests(12345)
"""
result = []
guests = self.host.getGuests(id=host_id, mask='id,fullyQualifiedDomainName')
if guests:
for vs in guests:
status_info = {
'id': vs['id'],
'fqdn': vs['fullyQualifiedDomainName'],
'status': self._delete_guest(vs['id'])
}
result.append(status_info)
return result
def list_guests(self, host_id, tags=None, cpus=None, memory=None, hostname=None,
domain=None, local_disk=None, nic_speed=None, public_ip=None,
private_ip=None, **kwargs):
"""Retrieve a list of all virtual servers on the dedicated host.
Example::
# Print out a list of instances with 4 cpu cores in the host id 12345.
for vsi in mgr.list_guests(host_id=12345, cpus=4):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_guests(mask=object_mask,cpus=4):
print vsi
:param integer host_id: the identifier of dedicated host
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'lastKnownPowerState.name',
'hourlyBillingFlag',
'powerState',
'maxCpu',
'maxMemory',
'datacenter',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['guests']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['guests']['maxCpu'] = utils.query_filter(cpus)
if memory:
_filter['guests']['maxMemory'] = utils.query_filter(memory)
if hostname:
_filter['guests']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['guests']['domain'] = utils.query_filter(domain)
if local_disk is not None:
_filter['guests']['localDiskFlag'] = (
utils.query_filter(bool(local_disk)))
if nic_speed:
_filter['guests']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['guests']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['guests']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.host.getGuests(id=host_id, **kwargs)
def list_instances(self, tags=None, cpus=None, memory=None, hostname=None,
disk=None, datacenter=None, **kwargs):
"""Retrieve a list of all dedicated hosts on the account
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string disk: filter based on disk
:param string datacenter: filter based on datacenter
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching dedicated host.
"""
if 'mask' not in kwargs:
items = [
'id',
'name',
'cpuCount',
'diskCapacity',
'memoryCapacity',
'datacenter',
'guestCount',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['dedicatedHosts']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if hostname:
_filter['dedicatedHosts']['name'] = (
utils.query_filter(hostname)
)
if cpus:
_filter['dedicatedHosts']['cpuCount'] = utils.query_filter(cpus)
if disk:
_filter['dedicatedHosts']['diskCapacity'] = (
utils.query_filter(disk))
if memory:
_filter['dedicatedHosts']['memoryCapacity'] = (
utils.query_filter(memory))
if datacenter:
_filter['dedicatedHosts']['datacenter']['name'] = (
utils.query_filter(datacenter))
kwargs['filter'] = _filter.to_dict()
return self.account.getDedicatedHosts(**kwargs)
def get_host(self, host_id, **kwargs):
"""Get details about a dedicated host.
:param integer : the host ID
:returns: A dictionary containing host information.
Example::
# Print out host ID 12345.
dh = mgr.get_host(12345)
print dh
# Print out only name and backendRouter for instance 12345
object_mask = "mask[name,backendRouter[id]]"
dh = mgr.get_host(12345, mask=mask)
print dh
"""
if 'mask' not in kwargs:
kwargs['mask'] = ('''
id,
name,
cpuCount,
memoryCapacity,
diskCapacity,
createDate,
modifyDate,
backendRouter[
id,
hostname,
domain
],
billingItem[
id,
nextInvoiceTotalRecurringAmount,
children[
categoryCode,
nextInvoiceTotalRecurringAmount
],
orderItem[
id,
order.userRecord[
username
]
]
],
datacenter[
id,
name,
longName
],
guests[
id,
hostname,
domain,
uuid
],
guestCount
''')
return self.host.getObject(id=host_id, **kwargs)
def place_order(self, hostname, domain, location, flavor, hourly, router=None):
"""Places an order for a dedicated host.
See get_create_options() for valid arguments.
:param string hostname: server hostname
:param string domain: server domain name
:param string location: location (datacenter) name
:param boolean hourly: True if using hourly pricing (default).
False for monthly.
:param int router: an optional value for selecting a backend router
"""
create_options = self._generate_create_dict(hostname=hostname,
router=router,
domain=domain,
flavor=flavor,
datacenter=location,
hourly=hourly)
return self.client['Product_Order'].placeOrder(create_options)
def verify_order(self, hostname, domain, location, hourly, flavor, router=None):
"""Verifies an order for a dedicated host.
See :func:`place_order` for a list of available options.
"""
create_options = self._generate_create_dict(hostname=hostname,
router=router,
domain=domain,
flavor=flavor,
datacenter=location,
hourly=hourly)
return self.client['Product_Order'].verifyOrder(create_options)
def _generate_create_dict(self,
hostname=None,
domain=None,
flavor=None,
router=None,
datacenter=None,
hourly=True):
"""Translates args into a dictionary for creating a dedicated host."""
package = self._get_package()
item = self._get_item(package, flavor)
location = self._get_location(package['regions'], datacenter)
price = self._get_price(item)
routers = self._get_backend_router(
location['location']['locationPackageDetails'], item)
router = self._get_default_router(routers, router)
hardware = {
'hostname': hostname,
'domain': domain,
'primaryBackendNetworkComponent': {
'router': {
'id': router
}
}
}
complex_type = "SoftLayer_Container_Product_Order_Virtual_DedicatedHost"
order = {
"complexType": complex_type,
"quantity": 1,
'location': location['keyname'],
'packageId': package['id'],
'prices': [{'id': price}],
'hardware': [hardware],
'useHourlyPricing': hourly,
}
return order
def _get_package(self):
"""Get the package related to simple dedicated host ordering."""
mask = '''
items[
id,
description,
prices,
capacity,
keyName,
itemCategory[categoryCode],
bundleItems[capacity,keyName,categories[categoryCode],hardwareGenericComponentModel[id,
hardwareComponentType[keyName]]]
],
regions[location[location[priceGroups]]]
'''
package_keyname = 'DEDICATED_HOST'
package = self.ordering_manager.get_package_by_key(package_keyname, mask=mask)
return package
def _get_location(self, regions, datacenter):
"""Get the longer key with a short location(datacenter) name."""
for region in regions:
# list of locations
if region['location']['location']['name'] == datacenter:
return region
raise SoftLayer.SoftLayerError("Could not find valid location for: '%s'" % datacenter)
def get_create_options(self):
"""Returns valid options for ordering a dedicated host."""
package = self._get_package()
# Locations
locations = []
for region in package['regions']:
locations.append({
'name': region['location']['location']['longName'],
'key': region['location']['location']['name'],
})
# flavors
dedicated_host = []
for item in package['items']:
if item['itemCategory']['categoryCode'] == \
'dedicated_virtual_hosts':
dedicated_host.append({
'name': item['description'],
'key': item['keyName'],
})
return {'locations': locations, 'dedicated_host': dedicated_host}
def _get_price(self, package):
"""Returns valid price for ordering a dedicated host."""
for price in package['prices']:
if not price.get('locationGroupId'):
return price['id']
raise SoftLayer.SoftLayerError("Could not find valid price")
def _get_item(self, package, flavor):
"""Returns the item for ordering a dedicated host."""
for item in package['items']:
if item['keyName'] == flavor:
return item
raise SoftLayer.SoftLayerError("Could not find valid item for: '%s'" % flavor)
def _get_backend_router(self, locations, item):
"""Returns valid router options for ordering a dedicated host."""
mask = '''
id,
hostname
'''
cpu_count = item['capacity']
for capacity in item['bundleItems']:
for category in capacity['categories']:
if category['categoryCode'] == 'dedicated_host_ram':
mem_capacity = capacity['capacity']
if category['categoryCode'] == 'dedicated_host_disk':
disk_capacity = capacity['capacity']
for hardwareComponent in item['bundleItems']:
if hardwareComponent['keyName'].find("GPU") != -1:
hardwareComponentType = hardwareComponent['hardwareGenericComponentModel']['hardwareComponentType']
gpuComponents = [
{
'hardwareComponentModel': {
'hardwareGenericComponentModel': {
'id': hardwareComponent['hardwareGenericComponentModel']['id'],
'hardwareComponentType': {
'keyName': hardwareComponentType['keyName']
}
}
}
},
{
'hardwareComponentModel': {
'hardwareGenericComponentModel': {
'id': hardwareComponent['hardwareGenericComponentModel']['id'],
'hardwareComponentType': {
'keyName': hardwareComponentType['keyName']
}
}
}
}
]
if locations is not None:
for location in locations:
if location['locationId'] is not None:
loc_id = location['locationId']
host = {
'cpuCount': cpu_count,
'memoryCapacity': mem_capacity,
'diskCapacity': disk_capacity,
'datacenter': {
'id': loc_id
}
}
if item['keyName'].find("GPU") != -1:
host['pciDevices'] = gpuComponents
routers = self.host.getAvailableRouters(host, mask=mask)
return routers
raise SoftLayer.SoftLayerError("Could not find available routers")
def _get_default_router(self, routers, router_name=None):
"""Returns the default router for ordering a dedicated host."""
if router_name is None:
for router in routers:
if router['id'] is not None:
return router['id']
else:
for router in routers:
if router['hostname'] == router_name:
return router['id']
raise SoftLayer.SoftLayerError("Could not find valid default router")
def get_router_options(self, datacenter=None, flavor=None):
"""Returns available backend routers for the dedicated host."""
package = self._get_package()
location = self._get_location(package['regions'], datacenter)
item = self._get_item(package, flavor)
return self._get_backend_router(location['location']['locationPackageDetails'], item)
def _delete_guest(self, guest_id):
"""Deletes a guest and returns 'Cancelled' or and Exception message"""
msg = 'Cancelled'
try:
self.guest.deleteObject(id=guest_id)
except SoftLayer.SoftLayerAPIError as e:
msg = 'Exception: ' + e.faultString
return msg
| |
## @file
# process FV generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import subprocess
import StringIO
from struct import *
import Ffs
import AprioriSection
from GenFdsGlobalVariable import GenFdsGlobalVariable
from GenFds import GenFds
from CommonDataClass.FdfClass import FvClassObject
from Common.Misc import SaveFileOnChange
from Common.LongFilePathSupport import CopyLongFilePath
from Common.LongFilePathSupport import OpenLongFilePath as open
T_CHAR_LF = '\n'
FV_UI_EXT_ENTY_GUID = 'A67DF1FA-8DE8-4E98-AF09-4BDF2EFFBC7C'
## generate FV
#
#
class FV (FvClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FvClassObject.__init__(self)
self.FvInfFile = None
self.FvAddressFile = None
self.BaseAddress = None
self.InfFileName = None
self.FvAddressFileName = None
self.CapsuleName = None
self.FvBaseAddress = None
self.FvForceRebase = None
self.FvRegionInFD = None
## AddToBuffer()
#
# Generate Fv and add it to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated FV data will be put
# @param BaseAddress base address of FV
# @param BlockSize block size of FV
# @param BlockNum How many blocks in FV
# @param ErasePolarity Flash erase polarity
# @param VtfDict VTF objects
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer (self, Buffer, BaseAddress=None, BlockSize= None, BlockNum=None, ErasePloarity='1', VtfDict=None, MacroDict = {}) :
if BaseAddress == None and self.UiFvName.upper() + 'fv' in GenFds.ImageBinDict.keys():
return GenFds.ImageBinDict[self.UiFvName.upper() + 'fv']
#
# Check whether FV in Capsule is in FD flash region.
# If yes, return error. Doesn't support FV in Capsule image is also in FD flash region.
#
if self.CapsuleName != None:
for FdName in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[FdName]
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType == 'FV':
for RegionData in RegionObj.RegionDataList:
if RegionData.endswith(".fv"):
continue
elif RegionData.upper() + 'fv' in GenFds.ImageBinDict.keys():
continue
elif self.UiFvName.upper() == RegionData.upper():
GenFdsGlobalVariable.ErrorLogger("Capsule %s in FD region can't contain a FV %s in FD region." % (self.CapsuleName, self.UiFvName.upper()))
GenFdsGlobalVariable.InfLogger( "\nGenerating %s FV" %self.UiFvName)
GenFdsGlobalVariable.LargeFileInFvFlags.append(False)
FFSGuid = None
if self.FvBaseAddress != None:
BaseAddress = self.FvBaseAddress
self.__InitializeInf__(BaseAddress, BlockSize, BlockNum, ErasePloarity, VtfDict)
#
# First Process the Apriori section
#
MacroDict.update(self.DefineVarDict)
GenFdsGlobalVariable.VerboseLogger('First generate Apriori file !')
FfsFileList = []
for AprSection in self.AprioriSectionList:
FileName = AprSection.GenFfs (self.UiFvName, MacroDict)
FfsFileList.append(FileName)
# Add Apriori file name to Inf file
self.FvInfFile.writelines("EFI_FILE_NAME = " + \
FileName + \
T_CHAR_LF)
# Process Modules in FfsList
for FfsFile in self.FfsList :
FileName = FfsFile.GenFfs(MacroDict, FvParentAddr=BaseAddress)
FfsFileList.append(FileName)
self.FvInfFile.writelines("EFI_FILE_NAME = " + \
FileName + \
T_CHAR_LF)
SaveFileOnChange(self.InfFileName, self.FvInfFile.getvalue(), False)
self.FvInfFile.close()
#
# Call GenFv tool
#
FvOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiFvName)
FvOutputFile = FvOutputFile + '.Fv'
# BUGBUG: FvOutputFile could be specified from FDF file (FV section, CreateFile statement)
if self.CreateFileName != None:
FvOutputFile = self.CreateFileName
FvInfoFileName = os.path.join(GenFdsGlobalVariable.FfsDir, self.UiFvName + '.inf')
CopyLongFilePath(GenFdsGlobalVariable.FvAddressFileName, FvInfoFileName)
OrigFvInfo = None
if os.path.exists (FvInfoFileName):
OrigFvInfo = open(FvInfoFileName, 'r').read()
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID;
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
NewFvInfo = None
if os.path.exists (FvInfoFileName):
NewFvInfo = open(FvInfoFileName, 'r').read()
if NewFvInfo != None and NewFvInfo != OrigFvInfo:
FvChildAddr = []
AddFileObj = open(FvInfoFileName, 'r')
AddrStrings = AddFileObj.readlines()
AddrKeyFound = False
for AddrString in AddrStrings:
if AddrKeyFound:
#get base address for the inside FvImage
FvChildAddr.append (AddrString)
elif AddrString.find ("[FV_BASE_ADDRESS]") != -1:
AddrKeyFound = True
AddFileObj.close()
if FvChildAddr != []:
# Update Ffs again
for FfsFile in self.FfsList :
FileName = FfsFile.GenFfs(MacroDict, FvChildAddr, BaseAddress)
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID;
#Update GenFv again
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
#
# Write the Fv contents to Buffer
#
if os.path.isfile(FvOutputFile):
FvFileObj = open ( FvOutputFile,'rb')
GenFdsGlobalVariable.VerboseLogger( "\nGenerate %s FV Successfully" %self.UiFvName)
GenFdsGlobalVariable.SharpCounter = 0
Buffer.write(FvFileObj.read())
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
FvAlignmentValue = 1 << (ord (FvHeaderBuffer[0x2E]) & 0x1F)
# FvAlignmentValue is larger than or equal to 1K
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x10000:
#The max alignment supported by FFS is 64K.
self.FvAlignment = "64K"
else:
self.FvAlignment = str (FvAlignmentValue / 0x400) + "K"
else:
# FvAlignmentValue is less than 1K
self.FvAlignment = str (FvAlignmentValue)
FvFileObj.close()
GenFds.ImageBinDict[self.UiFvName.upper() + 'fv'] = FvOutputFile
GenFdsGlobalVariable.LargeFileInFvFlags.pop()
else:
GenFdsGlobalVariable.ErrorLogger("Failed to generate %s FV file." %self.UiFvName)
return FvOutputFile
## _GetBlockSize()
#
# Calculate FV's block size
# Inherit block size from FD if no block size specified in FV
#
def _GetBlockSize(self):
if self.BlockSizeList:
return True
for FdName in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[FdName]
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType != 'FV':
continue
for RegionData in RegionObj.RegionDataList:
#
# Found the FD and region that contain this FV
#
if self.UiFvName.upper() == RegionData.upper():
RegionObj.BlockInfoOfRegion(FdObj.BlockSizeList, self)
if self.BlockSizeList:
return True
return False
## __InitializeInf__()
#
# Initilize the inf file to create FV
#
# @param self The object pointer
# @param BaseAddress base address of FV
# @param BlockSize block size of FV
# @param BlockNum How many blocks in FV
# @param ErasePolarity Flash erase polarity
# @param VtfDict VTF objects
#
def __InitializeInf__ (self, BaseAddress = None, BlockSize= None, BlockNum = None, ErasePloarity='1', VtfDict=None) :
#
# Create FV inf file
#
self.InfFileName = os.path.join(GenFdsGlobalVariable.FvDir,
self.UiFvName + '.inf')
self.FvInfFile = StringIO.StringIO()
#
# Add [Options]
#
self.FvInfFile.writelines("[options]" + T_CHAR_LF)
if BaseAddress != None :
self.FvInfFile.writelines("EFI_BASE_ADDRESS = " + \
BaseAddress + \
T_CHAR_LF)
if BlockSize != None:
self.FvInfFile.writelines("EFI_BLOCK_SIZE = " + \
'0x%X' %BlockSize + \
T_CHAR_LF)
if BlockNum != None:
self.FvInfFile.writelines("EFI_NUM_BLOCKS = " + \
' 0x%X' %BlockNum + \
T_CHAR_LF)
else:
if self.BlockSizeList == []:
if not self._GetBlockSize():
#set default block size is 1
self.FvInfFile.writelines("EFI_BLOCK_SIZE = 0x1" + T_CHAR_LF)
for BlockSize in self.BlockSizeList :
if BlockSize[0] != None:
self.FvInfFile.writelines("EFI_BLOCK_SIZE = " + \
'0x%X' %BlockSize[0] + \
T_CHAR_LF)
if BlockSize[1] != None:
self.FvInfFile.writelines("EFI_NUM_BLOCKS = " + \
' 0x%X' %BlockSize[1] + \
T_CHAR_LF)
if self.BsBaseAddress != None:
self.FvInfFile.writelines('EFI_BOOT_DRIVER_BASE_ADDRESS = ' + \
'0x%X' %self.BsBaseAddress)
if self.RtBaseAddress != None:
self.FvInfFile.writelines('EFI_RUNTIME_DRIVER_BASE_ADDRESS = ' + \
'0x%X' %self.RtBaseAddress)
#
# Add attribute
#
self.FvInfFile.writelines("[attributes]" + T_CHAR_LF)
self.FvInfFile.writelines("EFI_ERASE_POLARITY = " + \
' %s' %ErasePloarity + \
T_CHAR_LF)
if not (self.FvAttributeDict == None):
for FvAttribute in self.FvAttributeDict.keys() :
self.FvInfFile.writelines("EFI_" + \
FvAttribute + \
' = ' + \
self.FvAttributeDict[FvAttribute] + \
T_CHAR_LF )
if self.FvAlignment != None:
self.FvInfFile.writelines("EFI_FVB2_ALIGNMENT_" + \
self.FvAlignment.strip() + \
" = TRUE" + \
T_CHAR_LF)
#
# Generate FV extension header file
#
if self.FvNameGuid == None or self.FvNameGuid == '':
if len(self.FvExtEntryType) > 0:
GenFdsGlobalVariable.ErrorLogger("FV Extension Header Entries declared for %s with no FvNameGuid declaration." % (self.UiFvName))
if self.FvNameGuid <> None and self.FvNameGuid <> '':
TotalSize = 16 + 4
Buffer = ''
if self.FvNameString == 'TRUE':
#
# Create EXT entry for FV UI name
# This GUID is used: A67DF1FA-8DE8-4E98-AF09-4BDF2EFFBC7C
#
FvUiLen = len(self.UiFvName)
TotalSize += (FvUiLen + 16 + 4)
Guid = FV_UI_EXT_ENTY_GUID.split('-')
#
# Layout:
# EFI_FIRMWARE_VOLUME_EXT_ENTRY : size 4
# GUID : size 16
# FV UI name
#
Buffer += (pack('HH', (FvUiLen + 16 + 4), 0x0002)
+ pack('=LHHBBBBBBBB', int(Guid[0], 16), int(Guid[1], 16), int(Guid[2], 16),
int(Guid[3][-4:-2], 16), int(Guid[3][-2:], 16), int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16), int(Guid[4][-8:-6], 16), int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16), int(Guid[4][-2:], 16))
+ self.UiFvName)
for Index in range (0, len(self.FvExtEntryType)):
if self.FvExtEntryType[Index] == 'FILE':
# check if the path is absolute or relative
if os.path.isabs(self.FvExtEntryData[Index]):
FileFullPath = os.path.normpath(self.FvExtEntryData[Index])
else:
FileFullPath = os.path.normpath(os.path.join(GenFdsGlobalVariable.WorkSpaceDir, self.FvExtEntryData[Index]))
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
GenFdsGlobalVariable.ErrorLogger("Error opening FV Extension Header Entry file %s." % (self.FvExtEntryData[Index]))
FvExtFile = open (FileFullPath,'rb')
FvExtFile.seek(0,2)
Size = FvExtFile.tell()
if Size >= 0x10000:
GenFdsGlobalVariable.ErrorLogger("The size of FV Extension Header Entry file %s exceeds 0x10000." % (self.FvExtEntryData[Index]))
TotalSize += (Size + 4)
FvExtFile.seek(0)
Buffer += pack('HH', (Size + 4), int(self.FvExtEntryTypeValue[Index], 16))
Buffer += FvExtFile.read()
FvExtFile.close()
if self.FvExtEntryType[Index] == 'DATA':
ByteList = self.FvExtEntryData[Index].split(',')
Size = len (ByteList)
if Size >= 0x10000:
GenFdsGlobalVariable.ErrorLogger("The size of FV Extension Header Entry data %s exceeds 0x10000." % (self.FvExtEntryData[Index]))
TotalSize += (Size + 4)
Buffer += pack('HH', (Size + 4), int(self.FvExtEntryTypeValue[Index], 16))
for Index1 in range (0, Size):
Buffer += pack('B', int(ByteList[Index1], 16))
Guid = self.FvNameGuid.split('-')
Buffer = pack('=LHHBBBBBBBBL',
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
TotalSize
) + Buffer
#
# Generate FV extension header file if the total size is not zero
#
if TotalSize > 0:
FvExtHeaderFileName = os.path.join(GenFdsGlobalVariable.FvDir, self.UiFvName + '.ext')
FvExtHeaderFile = StringIO.StringIO()
FvExtHeaderFile.write(Buffer)
Changed = SaveFileOnChange(FvExtHeaderFileName, FvExtHeaderFile.getvalue(), True)
FvExtHeaderFile.close()
if Changed:
if os.path.exists (self.InfFileName):
os.remove (self.InfFileName)
self.FvInfFile.writelines("EFI_FV_EXT_HEADER_FILE_NAME = " + \
FvExtHeaderFileName + \
T_CHAR_LF)
#
# Add [Files]
#
self.FvInfFile.writelines("[files]" + T_CHAR_LF)
if VtfDict != None and self.UiFvName in VtfDict.keys():
self.FvInfFile.writelines("EFI_FILE_NAME = " + \
VtfDict.get(self.UiFvName) + \
T_CHAR_LF)
| |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface."""
import argparse
import collections
import itertools
import logging
import os
import signal
import sys
import threading
import unittest
from devil import base_error
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.android import ports
from devil.utils import reraiser_thread
from devil.utils import run_tests_helper
from pylib import constants
from pylib import forwarder
from pylib.base import base_test_result
from pylib.base import environment_factory
from pylib.base import test_dispatcher
from pylib.base import test_instance_factory
from pylib.base import test_run_factory
from pylib.linker import setup as linker_setup
from pylib.host_driven import setup as host_driven_setup
from pylib.instrumentation import setup as instrumentation_setup
from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.junit import setup as junit_setup
from pylib.junit import test_dispatcher as junit_dispatcher
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
from pylib.results import json_results
from pylib.results import report_results
def AddCommonOptions(parser):
"""Adds all common options to |parser|."""
group = parser.add_argument_group('Common Options')
default_build_type = os.environ.get('BUILDTYPE', 'Debug')
debug_or_release_group = group.add_mutually_exclusive_group()
debug_or_release_group.add_argument(
'--debug', action='store_const', const='Debug', dest='build_type',
default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
debug_or_release_group.add_argument(
'--release', action='store_const', const='Release', dest='build_type',
help=('If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.'))
group.add_argument('--build-directory', dest='build_directory',
help=('Path to the directory in which build files are'
' located (should not include build type)'))
group.add_argument('--output-directory', dest='output_directory',
help=('Path to the directory in which build files are'
' located (must include build type). This will take'
' precedence over --debug, --release and'
' --build-directory'))
group.add_argument('--num_retries', '--num-retries', dest='num_retries',
type=int, default=2,
help=('Number of retries for a test before '
'giving up (default: %(default)s).'))
group.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
group.add_argument('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
group.add_argument('--enable-platform-mode', action='store_true',
help=('Run the test scripts in platform mode, which '
'conceptually separates the test runner from the '
'"device" (local or remote, real or emulated) on '
'which the tests are running. [experimental]'))
group.add_argument('-e', '--environment', default='local',
choices=constants.VALID_ENVIRONMENTS,
help='Test environment to run in (default: %(default)s).')
group.add_argument('--adb-path',
help=('Specify the absolute path of the adb binary that '
'should be used.'))
group.add_argument('--json-results-file', dest='json_results_file',
help='If set, will dump results in JSON form '
'to specified file.')
def ProcessCommonOptions(args):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType(args.build_type)
if args.build_directory:
constants.SetBuildDirectory(args.build_directory)
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
if args.adb_path:
constants.SetAdbPath(args.adb_path)
# Some things such as Forwarder require ADB to be in the environment path.
adb_dir = os.path.dirname(constants.GetAdbPath())
if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
def AddRemoteDeviceOptions(parser):
group = parser.add_argument_group('Remote Device Options')
group.add_argument('--trigger',
help=('Only triggers the test if set. Stores test_run_id '
'in given file path. '))
group.add_argument('--collect',
help=('Only collects the test results if set. '
'Gets test_run_id from given file path.'))
group.add_argument('--remote-device', action='append',
help='Device type to run test on.')
group.add_argument('--results-path',
help='File path to download results to.')
group.add_argument('--api-protocol',
help='HTTP protocol to use. (http or https)')
group.add_argument('--api-address',
help='Address to send HTTP requests.')
group.add_argument('--api-port',
help='Port to send HTTP requests to.')
group.add_argument('--runner-type',
help='Type of test to run as.')
group.add_argument('--runner-package',
help='Package name of test.')
group.add_argument('--device-type',
choices=constants.VALID_DEVICE_TYPES,
help=('Type of device to run on. iOS or android'))
group.add_argument('--device-oem', action='append',
help='Device OEM to run on.')
group.add_argument('--remote-device-file',
help=('File with JSON to select remote device. '
'Overrides all other flags.'))
group.add_argument('--remote-device-timeout', type=int,
help='Times to retry finding remote device')
group.add_argument('--network-config', type=int,
help='Integer that specifies the network environment '
'that the tests will be run in.')
group.add_argument('--test-timeout', type=int,
help='Test run timeout in seconds.')
device_os_group = group.add_mutually_exclusive_group()
device_os_group.add_argument('--remote-device-minimum-os',
help='Minimum OS on device.')
device_os_group.add_argument('--remote-device-os', action='append',
help='OS to have on the device.')
api_secret_group = group.add_mutually_exclusive_group()
api_secret_group.add_argument('--api-secret', default='',
help='API secret for remote devices.')
api_secret_group.add_argument('--api-secret-file', default='',
help='Path to file that contains API secret.')
api_key_group = group.add_mutually_exclusive_group()
api_key_group.add_argument('--api-key', default='',
help='API key for remote devices.')
api_key_group.add_argument('--api-key-file', default='',
help='Path to file that contains API key.')
def AddDeviceOptions(parser):
"""Adds device options to |parser|."""
group = parser.add_argument_group(title='Device Options')
group.add_argument('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
group.add_argument('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
group.add_argument('--blacklist-file', help='Device blacklist file.')
group.add_argument('--enable-device-cache', action='store_true',
help='Cache device state to disk between runs')
group.add_argument('--incremental-install', action='store_true',
help='Use an _incremental apk.')
group.add_argument('--enable-concurrent-adb', action='store_true',
help='Run multiple adb commands at the same time, even '
'for the same device.')
def AddGTestOptions(parser):
"""Adds gtest options to |parser|."""
group = parser.add_argument_group('GTest Options')
group.add_argument('-s', '--suite', dest='suite_name',
nargs='+', metavar='SUITE_NAME', required=True,
help='Executable name of the test suite to run.')
group.add_argument('--gtest_also_run_disabled_tests',
'--gtest-also-run-disabled-tests',
dest='run_disabled', action='store_true',
help='Also run disabled tests if applicable.')
group.add_argument('-a', '--test-arguments', dest='test_arguments',
default='',
help='Additional arguments to pass to the test.')
group.add_argument('-t', '--shard-timeout',
dest='shard_timeout', type=int, default=60,
help='Timeout to wait for each test '
'(default: %(default)s).')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
group.add_argument('--app-data-file', action='append', dest='app_data_files',
help='A file path relative to the app data directory '
'that should be saved to the host.')
group.add_argument('--app-data-file-dir',
help='Host directory to which app data files will be'
' saved. Used with --app-data-file.')
group.add_argument('--delete-stale-data', dest='delete_stale_data',
action='store_true',
help='Delete stale test data on the device.')
group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
dest='repeat', type=int, default=0,
help='Number of times to repeat the specified set of '
'tests.')
filter_group = group.add_mutually_exclusive_group()
filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
dest='test_filter',
help='googletest-style filter string.')
filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
help='Path to file that contains googletest-style '
'filter strings. (Lines will be joined with '
'":" to create a single filter string.)')
AddDeviceOptions(parser)
AddCommonOptions(parser)
AddRemoteDeviceOptions(parser)
def AddLinkerTestOptions(parser):
group = parser.add_argument_group('Linker Test Options')
group.add_argument('-f', '--gtest-filter', dest='test_filter',
help='googletest-style filter string.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def AddJavaTestOptions(argument_group):
"""Adds the Java test options to |option_parser|."""
argument_group.add_argument(
'-f', '--test-filter', dest='test_filter',
help=('Test filter (if not fully qualified, will run all matches).'))
argument_group.add_argument(
'--repeat', dest='repeat', type=int, default=0,
help='Number of times to repeat the specified set of tests.')
argument_group.add_argument(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
argument_group.add_argument(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
argument_group.add_argument(
'--screenshot', dest='screenshot_failures', action='store_true',
help='Capture screenshots of test failures')
argument_group.add_argument(
'--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
argument_group.add_argument(
'--official-build', action='store_true', help='Run official build tests.')
argument_group.add_argument(
'--test_data', '--test-data', action='append', default=[],
help=('Each instance defines a directory of test data that should be '
'copied to the target(s) before running the tests. The argument '
'should be of the form <target>:<source>, <target> is relative to '
'the device data directory, and <source> is relative to the '
'chromium build directory.'))
argument_group.add_argument(
'--disable-dalvik-asserts', dest='set_asserts', action='store_false',
default=True, help='Removes the dalvik.vm.enableassertions property')
def ProcessJavaTestOptions(args):
"""Processes options/arguments and populates |options| with defaults."""
# TODO(jbudorick): Handle most of this function in argparse.
if args.annotation_str:
args.annotations = args.annotation_str.split(',')
elif args.test_filter:
args.annotations = []
else:
args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest', 'IntegrationTest']
if args.exclude_annotation_str:
args.exclude_annotations = args.exclude_annotation_str.split(',')
else:
args.exclude_annotations = []
def AddInstrumentationTestOptions(parser):
"""Adds Instrumentation test options to |parser|."""
parser.usage = '%(prog)s [options]'
group = parser.add_argument_group('Instrumentation Test Options')
AddJavaTestOptions(group)
java_or_python_group = group.add_mutually_exclusive_group()
java_or_python_group.add_argument(
'-j', '--java-only', action='store_false',
dest='run_python_tests', default=True, help='Run only the Java tests.')
java_or_python_group.add_argument(
'-p', '--python-only', action='store_false',
dest='run_java_tests', default=True,
help='Run only the host-driven tests.')
group.add_argument('--host-driven-root',
help='Root of the host-driven tests.')
group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
group.add_argument('--apk-under-test', dest='apk_under_test',
help=('the name of the apk under test.'))
group.add_argument('--test-apk', dest='test_apk', required=True,
help=('The name of the apk containing the tests '
'(without the .apk extension; '
'e.g. "ContentShellTest").'))
group.add_argument('--additional-apk', action='append',
dest='additional_apks', default=[],
help='Additional apk that must be installed on '
'the device when the tests are run')
group.add_argument('--coverage-dir',
help=('Directory in which to place all generated '
'EMMA coverage files.'))
group.add_argument('--device-flags', dest='device_flags', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--device-flags-file', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
group.add_argument('--delete-stale-data', dest='delete_stale_data',
action='store_true',
help='Delete stale test data on the device.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def ProcessInstrumentationOptions(args):
"""Processes options/arguments and populate |options| with defaults.
Args:
args: argparse.Namespace object.
Returns:
An InstrumentationOptions named tuple which contains all options relevant to
instrumentation tests.
"""
ProcessJavaTestOptions(args)
if not args.host_driven_root:
args.run_python_tests = False
if os.path.exists(args.test_apk):
args.test_apk_path = args.test_apk
args.test_apk, _ = os.path.splitext(os.path.basename(args.test_apk))
else:
args.test_apk_path = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.test_apk)
args.test_apk_jar_path = os.path.join(
constants.GetOutDirectory(),
constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % args.test_apk)
args.test_support_apk_path = '%sSupport%s' % (
os.path.splitext(args.test_apk_path))
args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
# TODO(jbudorick): Get rid of InstrumentationOptions.
return instrumentation_test_options.InstrumentationOptions(
args.tool,
args.annotations,
args.exclude_annotations,
args.test_filter,
args.test_data,
args.save_perf_json,
args.screenshot_failures,
args.wait_for_debugger,
args.coverage_dir,
args.test_apk,
args.test_apk_path,
args.test_apk_jar_path,
args.test_runner,
args.test_support_apk_path,
args.device_flags,
args.isolate_file_path,
args.set_asserts,
args.delete_stale_data
)
def AddUIAutomatorTestOptions(parser):
"""Adds UI Automator test options to |parser|."""
group = parser.add_argument_group('UIAutomator Test Options')
AddJavaTestOptions(group)
group.add_argument(
'--package', required=True, choices=constants.PACKAGE_INFO.keys(),
metavar='PACKAGE', help='Package under test.')
group.add_argument(
'--test-jar', dest='test_jar', required=True,
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path '
'to the jar.'))
AddCommonOptions(parser)
AddDeviceOptions(parser)
def AddJUnitTestOptions(parser):
"""Adds junit test options to |parser|."""
group = parser.add_argument_group('JUnit Test Options')
group.add_argument(
'-s', '--test-suite', dest='test_suite', required=True,
help=('JUnit test suite to run.'))
group.add_argument(
'-f', '--test-filter', dest='test_filter',
help='Filters tests googletest-style.')
group.add_argument(
'--package-filter', dest='package_filter',
help='Filters tests by package.')
group.add_argument(
'--runner-filter', dest='runner_filter',
help='Filters tests by runner class. Must be fully qualified.')
group.add_argument(
'--sdk-version', dest='sdk_version', type=int,
help='The Android SDK version.')
AddCommonOptions(parser)
def AddMonkeyTestOptions(parser):
"""Adds monkey test options to |parser|."""
group = parser.add_argument_group('Monkey Test Options')
group.add_argument(
'--package', required=True, choices=constants.PACKAGE_INFO.keys(),
metavar='PACKAGE', help='Package under test.')
group.add_argument(
'--event-count', default=10000, type=int,
help='Number of events to generate (default: %(default)s).')
group.add_argument(
'--category', default='',
help='A list of allowed categories.')
group.add_argument(
'--throttle', default=100, type=int,
help='Delay between events (ms) (default: %(default)s). ')
group.add_argument(
'--seed', type=int,
help=('Seed value for pseudo-random generator. Same seed value generates '
'the same sequence of events. Seed is randomized by default.'))
group.add_argument(
'--extra-args', default='',
help=('String of other args to pass to the command verbatim.'))
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessMonkeyTestOptions(args):
"""Processes all monkey test options.
Args:
args: argparse.Namespace object.
Returns:
A MonkeyOptions named tuple which contains all options relevant to
monkey tests.
"""
# TODO(jbudorick): Handle this directly in argparse with nargs='+'
category = args.category
if category:
category = args.category.split(',')
# TODO(jbudorick): Get rid of MonkeyOptions.
return monkey_test_options.MonkeyOptions(
args.verbose_count,
args.package,
args.event_count,
category,
args.throttle,
args.seed,
args.extra_args)
def AddUirobotTestOptions(parser):
"""Adds uirobot test options to |option_parser|."""
group = parser.add_argument_group('Uirobot Test Options')
group.add_argument('--app-under-test', required=True,
help='APK to run tests on.')
group.add_argument(
'--repeat', dest='repeat', type=int, default=0,
help='Number of times to repeat the uirobot test.')
group.add_argument(
'--minutes', default=5, type=int,
help='Number of minutes to run uirobot test [default: %(default)s].')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def AddPerfTestOptions(parser):
"""Adds perf test options to |parser|."""
group = parser.add_argument_group('Perf Test Options')
class SingleStepAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values and not namespace.single_step:
parser.error('single step command provided, '
'but --single-step not specified.')
elif namespace.single_step and not values:
parser.error('--single-step specified, '
'but no single step command provided.')
setattr(namespace, self.dest, values)
step_group = group.add_mutually_exclusive_group(required=True)
# TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
# This requires removing "--" from client calls.
step_group.add_argument(
'--single-step', action='store_true',
help='Execute the given command with retries, but only print the result '
'for the "most successful" round.')
step_group.add_argument(
'--steps',
help='JSON file containing the list of commands to run.')
step_group.add_argument(
'--print-step',
help='The name of a previously executed perf step to print.')
group.add_argument(
'--output-json-list',
help='Write a simple list of names from --steps into the given file.')
group.add_argument(
'--collect-chartjson-data',
action='store_true',
help='Cache the chartjson output from each step for later use.')
group.add_argument(
'--output-chartjson-data',
default='',
help='Write out chartjson into the given file.')
group.add_argument(
'--get-output-dir-archive', metavar='FILENAME',
help='Write the chached output directory archived by a step into the'
' given ZIP file.')
group.add_argument(
'--flaky-steps',
help=('A JSON file containing steps that are flaky '
'and will have its exit code ignored.'))
group.add_argument(
'--no-timeout', action='store_true',
help=('Do not impose a timeout. Each perf step is responsible for '
'implementing the timeout logic.'))
group.add_argument(
'-f', '--test-filter',
help=('Test filter (will match against the names listed in --steps).'))
group.add_argument(
'--dry-run', action='store_true',
help='Just print the steps without executing.')
# Uses 0.1 degrees C because that's what Android does.
group.add_argument(
'--max-battery-temp', type=int,
help='Only start tests when the battery is at or below the given '
'temperature (0.1 C)')
group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
help='If --single-step is specified, the command to run.')
group.add_argument('--min-battery-level', type=int,
help='Only starts tests when the battery is charged above '
'given level.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessPerfTestOptions(args):
"""Processes all perf test options.
Args:
args: argparse.Namespace object.
Returns:
A PerfOptions named tuple which contains all options relevant to
perf tests.
"""
# TODO(jbudorick): Move single_step handling down into the perf tests.
if args.single_step:
args.single_step = ' '.join(args.single_step_command)
# TODO(jbudorick): Get rid of PerfOptions.
return perf_test_options.PerfOptions(
args.steps, args.flaky_steps, args.output_json_list,
args.print_step, args.no_timeout, args.test_filter,
args.dry_run, args.single_step, args.collect_chartjson_data,
args.output_chartjson_data, args.get_output_dir_archive,
args.max_battery_temp, args.min_battery_level)
def AddPythonTestOptions(parser):
group = parser.add_argument_group('Python Test Options')
group.add_argument(
'-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
help='Name of the test suite to run.')
AddCommonOptions(parser)
def _RunLinkerTests(args, devices):
"""Subcommand of RunTestsCommands which runs linker tests."""
runner_factory, tests = linker_setup.Setup(args, devices)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=60,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Linker test',
test_package='ChromiumLinkerTest')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunInstrumentationTests(args, devices):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))
instrumentation_options = ProcessInstrumentationOptions(args)
if len(devices) > 1 and args.wait_for_debugger:
logging.warning('Debugger can not be sharded, using first available device')
devices = devices[:1]
results = base_test_result.TestRunResults()
exit_code = 0
if args.run_java_tests:
java_runner_factory, java_tests = instrumentation_setup.Setup(
instrumentation_options, devices)
else:
java_runner_factory = None
java_tests = None
if args.run_python_tests:
py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
args.host_driven_root, args.official_build,
instrumentation_options)
else:
py_runner_factory = None
py_tests = None
results = []
repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
else itertools.count())
for _ in repetitions:
iteration_results = base_test_result.TestRunResults()
if java_tests:
test_results, test_exit_code = test_dispatcher.RunTests(
java_tests, java_runner_factory, devices, shard=True,
test_timeout=None, num_retries=args.num_retries)
iteration_results.AddTestRunResults(test_results)
# Only allow exit code escalation
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
if py_tests:
test_results, test_exit_code = test_dispatcher.RunTests(
py_tests, py_runner_factory, devices, shard=True, test_timeout=None,
num_retries=args.num_retries)
iteration_results.AddTestRunResults(test_results)
# Only allow exit code escalation
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
results.append(iteration_results)
report_results.LogFull(
results=iteration_results,
test_type='Instrumentation',
test_package=os.path.basename(args.test_apk),
annotation=args.annotations,
flakiness_server=args.flakiness_dashboard_server)
if args.json_results_file:
json_results.GenerateJsonResultsFile(results, args.json_results_file)
return exit_code
def _RunJUnitTests(args):
"""Subcommand of RunTestsCommand which runs junit tests."""
runner_factory, tests = junit_setup.Setup(args)
results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
report_results.LogFull(
results=results,
test_type='JUnit',
test_package=args.test_suite)
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunMonkeyTests(args, devices):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(args)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Monkey',
test_package='Monkey')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunPerfTests(args, active_devices):
"""Subcommand of RunTestsCommands which runs perf tests."""
perf_options = ProcessPerfTestOptions(args)
# Just save a simple json with a list of test names.
if perf_options.output_json_list:
return perf_test_runner.OutputJsonList(
perf_options.steps, perf_options.output_json_list)
# Just print the results from a single previously executed step.
if perf_options.print_step:
return perf_test_runner.PrintTestOutput(
perf_options.print_step, perf_options.output_chartjson_data,
perf_options.get_output_dir_archive)
runner_factory, tests, devices = perf_setup.Setup(
perf_options, active_devices)
# shard=False means that each device will get the full list of tests
# and then each one will decide their own affinity.
# shard=True means each device will pop the next test available from a queue,
# which increases throughput but have no affinity.
results, _ = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Perf',
test_package='Perf')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
perf_test_runner.PrintSummary(tests)
# Always return 0 on the sharding stage. Individual tests exit_code
# will be returned on the print_step stage.
return 0
def _RunPythonTests(args):
"""Subcommand of RunTestsCommand which runs python unit tests."""
suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
suite_path = suite_vars['path']
suite_test_modules = suite_vars['test_modules']
sys.path = [suite_path] + sys.path
try:
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
for m in suite_test_modules)
runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
return 0 if runner.run(suite).wasSuccessful() else 1
finally:
sys.path = sys.path[1:]
def _GetAttachedDevices(blacklist_file, test_device, enable_cache):
"""Get all attached devices.
Args:
blacklist_file: Path to device blacklist.
test_device: Name of a specific device to use.
enable_cache: Whether to enable checksum caching.
Returns:
A list of attached devices.
"""
blacklist = (device_blacklist.Blacklist(blacklist_file)
if blacklist_file
else None)
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist, enable_device_files_cache=enable_cache)
if test_device:
test_device = [d for d in attached_devices if d == test_device]
if not test_device:
raise device_errors.DeviceUnreachableError(
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
return test_device
else:
if not attached_devices:
raise device_errors.NoDevicesError()
return sorted(attached_devices)
def RunTestsCommand(args, parser): # pylint: disable=too-many-return-statements
"""Checks test type and dispatches to the appropriate function.
Args:
args: argparse.Namespace object.
parser: argparse.ArgumentParser object.
Returns:
Integer indicated exit code.
Raises:
Exception: Unknown command name passed in, or an exception from an
individual test runner.
"""
command = args.command
ProcessCommonOptions(args)
if args.enable_platform_mode:
return RunTestsInPlatformMode(args, parser)
forwarder.Forwarder.RemoveHostLog()
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
def get_devices():
return _GetAttachedDevices(args.blacklist_file, args.test_device,
args.enable_device_cache)
if command == 'gtest':
return RunTestsInPlatformMode(args, parser)
elif command == 'linker':
return _RunLinkerTests(args, get_devices())
elif command == 'instrumentation':
return _RunInstrumentationTests(args, get_devices())
elif command == 'junit':
return _RunJUnitTests(args)
elif command == 'monkey':
return _RunMonkeyTests(args, get_devices())
elif command == 'perf':
return _RunPerfTests(args, get_devices())
elif command == 'python':
return _RunPythonTests(args)
else:
raise Exception('Unknown test type.')
_SUPPORTED_IN_PLATFORM_MODE = [
# TODO(jbudorick): Add support for more test types.
'gtest',
'instrumentation',
'uirobot',
]
def RunTestsInPlatformMode(args, parser):
def infra_error(message):
parser.exit(status=constants.INFRA_EXIT_CODE, message=message)
if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
infra_error('%s is not yet supported in platform mode' % args.command)
with environment_factory.CreateEnvironment(args, infra_error) as env:
with test_instance_factory.CreateTestInstance(args, infra_error) as test:
with test_run_factory.CreateTestRun(
args, env, test, infra_error) as test_run:
results = []
repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
else itertools.count())
for _ in repetitions:
iteration_results = test_run.RunTests()
if iteration_results is not None:
results.append(iteration_results)
report_results.LogFull(
results=iteration_results,
test_type=test.TestType(),
test_package=test_run.TestPackage(),
annotation=getattr(args, 'annotations', None),
flakiness_server=getattr(args, 'flakiness_dashboard_server',
None))
if args.json_results_file:
json_results.GenerateJsonResultsFile(
results, args.json_results_file)
return (0 if all(r.DidRunPass() for r in results)
else constants.ERROR_EXIT_CODE)
CommandConfigTuple = collections.namedtuple(
'CommandConfigTuple',
['add_options_func', 'help_txt'])
VALID_COMMANDS = {
'gtest': CommandConfigTuple(
AddGTestOptions,
'googletest-based C++ tests'),
'instrumentation': CommandConfigTuple(
AddInstrumentationTestOptions,
'InstrumentationTestCase-based Java tests'),
'junit': CommandConfigTuple(
AddJUnitTestOptions,
'JUnit4-based Java tests'),
'monkey': CommandConfigTuple(
AddMonkeyTestOptions,
"Tests based on Android's monkey"),
'perf': CommandConfigTuple(
AddPerfTestOptions,
'Performance tests'),
'python': CommandConfigTuple(
AddPythonTestOptions,
'Python tests based on unittest.TestCase'),
'linker': CommandConfigTuple(
AddLinkerTestOptions,
'Linker tests'),
'uirobot': CommandConfigTuple(
AddUirobotTestOptions,
'Uirobot test'),
}
def DumpThreadStacks(_signal, _frame):
for thread in threading.enumerate():
reraiser_thread.LogThreadStack(thread)
def main():
signal.signal(signal.SIGUSR1, DumpThreadStacks)
parser = argparse.ArgumentParser()
command_parsers = parser.add_subparsers(title='test types',
dest='command')
for test_type, config in sorted(VALID_COMMANDS.iteritems(),
key=lambda x: x[0]):
subparser = command_parsers.add_parser(
test_type, usage='%(prog)s [options]', help=config.help_txt)
config.add_options_func(subparser)
args = parser.parse_args()
try:
return RunTestsCommand(args, parser)
except base_error.BaseError as e:
logging.exception('Error occurred.')
if e.is_infra_error:
return constants.INFRA_EXIT_CODE
return constants.ERROR_EXIT_CODE
except: # pylint: disable=W0702
logging.exception('Unrecognized error occurred.')
return constants.ERROR_EXIT_CODE
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import logging
import os
import xml.dom.minidom as minidom
from xml.parsers.expat import ExpatError
import crash_utils
from repository_parser_interface import ParserInterface
class GitParser(ParserInterface):
"""Parser for Git repository in googlesource.
Attributes:
parsed_deps: A map from component path to its repository name, regression,
etc.
url_parts_map: A map from url type to its url parts. This parts are added
the base url to form different urls.
"""
def __init__(self, parsed_deps, url_parts_map):
self.component_to_url_map = parsed_deps
self.url_parts_map = url_parts_map
def ParseChangelog(self, component_path, range_start, range_end):
file_to_revision_map = {}
revision_map = {}
base_url = self.component_to_url_map[component_path]['repository']
changelog_url = base_url + self.url_parts_map['changelog_url']
revision_url = base_url + self.url_parts_map['revision_url']
# Retrieve data from the url, return empty maps if fails. Html url is a\
# url where the changelog can be parsed from html.
url = changelog_url % (range_start, range_end)
html_url = url + '?pretty=fuller'
response = crash_utils.GetDataFromURL(html_url)
if not response:
logging.error('Failed to retrieve changelog from %s', html_url)
return (revision_map, file_to_revision_map)
# Parse xml out of the returned string. If it failes, return empty map.
try:
dom = minidom.parseString(response)
except ExpatError:
logging.error('Failed to parse changelog from %s', url)
return (revision_map, file_to_revision_map)
# The revisions information are in from the third divs to the second
# to last one.
divs = dom.getElementsByTagName('div')[2:-1]
pres = dom.getElementsByTagName('pre')
uls = dom.getElementsByTagName('ul')
# Divs, pres and uls each contain revision information for one CL, so
# they should have same length.
if not divs or len(divs) != len(pres) or len(pres) != len(uls):
self.ParseChangelogFromJSON(range_start, range_end, changelog_url,
revision_url, revision_map,
file_to_revision_map)
return (revision_map, file_to_revision_map)
# Iterate through divs and parse revisions
for (div, pre, ul) in zip(divs, pres, uls):
# Create new revision object for each revision.
revision = {}
# There must be three <tr>s. If not, this page is wrong.
trs = div.getElementsByTagName('tr')
if len(trs) != 3:
continue
# Retrieve git hash.
githash = trs[0].getElementsByTagName('a')[0].firstChild.nodeValue
# Retrieve and set author.
author = trs[1].getElementsByTagName(
'td')[0].firstChild.nodeValue.split('<')[0]
revision['author'] = author
# Retrive and set message.
revision['message'] = pre.firstChild.nodeValue
# Set url of this CL.
revision_url_part = self.url_parts_map['revision_url'] % githash
revision['url'] = base_url + revision_url_part
# Go through changed files, they are in li.
lis = ul.getElementsByTagName('li')
for li in lis:
# Retrieve path and action of the changed file
file_path = li.getElementsByTagName('a')[0].firstChild.nodeValue
file_action = li.getElementsByTagName('span')[0].getAttribute('class')
# Normalize file action so that it is same as SVN parser.
if file_action == 'add':
file_action = 'A'
elif file_action == 'delete':
file_action = 'D'
elif file_action == 'modify':
file_action = 'M'
# Add the changed file to the map.
changed_file = os.path.basename(file_path)
if changed_file not in file_to_revision_map:
file_to_revision_map[changed_file] = []
file_to_revision_map[changed_file].append((githash, file_action,
file_path))
# Add this revision object to the map.
revision_map[githash] = revision
# Parse one revision for the start range, because googlesource does not
# include the start of the range.
self.ParseRevision(revision_url, range_start, revision_map,
file_to_revision_map)
return (revision_map, file_to_revision_map)
def ParseChangelogFromJSON(self, range_start, range_end, changelog_url,
revision_url, revision_map, file_to_revision_map):
"""Parses changelog by going over the JSON file.
Args:
range_start: Starting range of the regression.
range_end: Ending range of the regression.
changelog_url: The url to retrieve changelog from.
revision_url: The url to retrieve individual revision from.
revision_map: A map from a git hash number to its revision information.
file_to_revision_map: A map from file to a git hash in which it occurs.
"""
# Compute URLs from given range, and retrieves changelog. Stop if it fails.
changelog_url %= (range_start, range_end)
json_url = changelog_url + '?format=json'
response = crash_utils.GetDataFromURL(json_url)
if not response:
logging.error('Failed to retrieve changelog from %s.', json_url)
return
# Parse changelog from the returned object. The returned string should
# start with ")}]'\n", so start from the 6th character.
revisions = crash_utils.LoadJSON(response[5:])
if not revisions:
logging.error('Failed to parse changelog from %s.', json_url)
return
# Parse individual revision in the log.
for revision in revisions['log']:
githash = revision['commit']
self.ParseRevision(revision_url, githash, revision_map,
file_to_revision_map)
# Parse the revision with range_start, because googlesource ignores
# that one.
self.ParseRevision(revision_url, range_start, revision_map,
file_to_revision_map)
def ParseRevision(self, revision_url, githash, revision_map,
file_to_revision_map):
# Retrieve data from the URL, return if it fails.
url = revision_url % githash
response = crash_utils.GetDataFromURL(url + '?format=json')
if not response:
logging.warning('Failed to retrieve revision from %s.', url)
return
# Load JSON object from the string. If it fails, terminate the function.
json_revision = crash_utils.LoadJSON(response[5:])
if not json_revision:
logging.warning('Failed to parse revision from %s.', url)
return
# Create a map representing object and get githash from the JSON object.
revision = {}
githash = json_revision['commit']
# Set author, message and URL of this CL.
revision['author'] = json_revision['author']['name']
revision['message'] = json_revision['message']
revision['url'] = url
# Iterate through the changed files.
for diff in json_revision['tree_diff']:
file_path = diff['new_path']
file_action = diff['type']
# Normalize file action so that it fits with svn_repository_parser.
if file_action == 'add':
file_action = 'A'
elif file_action == 'delete':
file_action = 'D'
elif file_action == 'modify':
file_action = 'M'
# Add the file to the map.
changed_file = os.path.basename(file_path)
if changed_file not in file_to_revision_map:
file_to_revision_map[changed_file] = []
file_to_revision_map[changed_file].append(
(githash, file_action, file_path))
# Add this CL to the map.
revision_map[githash] = revision
return
def ParseLineDiff(self, path, component, file_action, githash):
changed_line_numbers = []
changed_line_contents = []
base_url = self.component_to_url_map[component]['repository']
backup_url = (base_url + self.url_parts_map['revision_url']) % githash
# If the file is added (not modified), treat it as if it is not changed.
if file_action == 'A':
return (backup_url, changed_line_numbers, changed_line_contents)
# Retrieves the diff data from URL, and if it fails, return emptry lines.
url = (base_url + self.url_parts_map['diff_url']) % (githash, path)
data = crash_utils.GetDataFromURL(url + '?format=text')
if not data:
logging.error('Failed to get diff from %s.', url)
return (backup_url, changed_line_numbers, changed_line_contents)
# Decode the returned object to line diff info
diff = base64.b64decode(data).splitlines()
# Iterate through the lines in diff. Set current line to -1 so that we know
# that current line is part of the diff chunk.
current_line = -1
for line in diff:
line = line.strip()
# If line starts with @@, a new chunk starts.
if line.startswith('@@'):
current_line = int(line.split('+')[1].split(',')[0])
# If we are in a chunk.
elif current_line != -1:
# If line is either added or modified.
if line.startswith('+'):
changed_line_numbers.append(current_line)
changed_line_contents.append(line[2:])
# Do not increment current line if the change is 'delete'.
if not line.startswith('-'):
current_line += 1
# Return url without '?format=json'
return (url, changed_line_numbers, changed_line_contents)
def ParseBlameInfo(self, component, file_path, line, revision):
base_url = self.component_to_url_map[component]['repository']
# Retrieve blame JSON file from googlesource. If it fails, return None.
url_part = self.url_parts_map['blame_url'] % (revision, file_path)
blame_url = base_url + url_part
json_string = crash_utils.GetDataFromURL(blame_url)
if not json_string:
logging.error('Failed to retrieve annotation information from %s.',
blame_url)
return
# Parse JSON object from the string. The returned string should
# start with ")}]'\n", so start from the 6th character.
annotation = crash_utils.LoadJSON(json_string[5:])
if not annotation:
logging.error('Failed to parse annotation information from %s.',
blame_url)
return
# Go through the regions, which is a list of consecutive lines with same
# author/revision.
for blame_line in annotation['regions']:
start = blame_line['start']
count = blame_line['count']
# For each region, check if the line we want the blame info of is in this
# region.
if start <= line and line <= start + count - 1:
# If we are in the right region, get the information from the line.
revision = blame_line['commit']
author = blame_line['author']['name']
revision_url_parts = self.url_parts_map['revision_url'] % revision
revision_url = base_url + revision_url_parts
# TODO(jeun): Add a way to get content from JSON object.
content = None
return (content, revision, author, revision_url)
# Return none if the region does not exist.
return None
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
The iPOPO composer agent
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import threading
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Instantiate, Validate, Invalidate
# Pelix
import pelix.ipopo.constants as constants
import pelix.remote
# Composer
import cohorte.composer
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (3, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_AGENT_ISOLATE)
@Requires('_ipopo', constants.IPOPO_SERVICE_SPECIFICATION)
@Instantiate('cohorte-composer-agent-ipopo')
class IPopoAgent(object):
"""
The iPOPO component handler for the isolate composer
"""
def __init__(self):
"""
Sets up members
"""
# Injected services
self._ipopo = None
# Name -> Component
self.__names = {}
# Factory -> set(Instantiated components)
self.__components = {}
# Factory -> set(Remaining components)
self.__remaining = {}
# Thread safety
self.__lock = threading.RLock()
def handle_ipopo_event(self, event):
"""
Handles an iPOPO event
:param event: An iPOPO event
"""
kind = event.get_kind()
factory = event.get_factory_name()
with self.__lock:
if kind == constants.IPopoEvent.REGISTERED:
# New factory registered
try:
# Instantiate waiting components
self.handle(self.__remaining[factory])
except KeyError:
# Unknown factory
pass
elif kind == constants.IPopoEvent.UNREGISTERED:
# Factory gone, put components in remaining state
try:
self.__remaining.setdefault(factory, set()) \
.update(self.__components.pop(factory))
except KeyError:
# No instantiated components for this factory
pass
@Validate
def validate(self, _):
"""
Component validated
"""
# Register to iPOPO events
self._ipopo.add_listener(self)
@Invalidate
def invalidate(self, _):
"""
Component invalidated
"""
# Unregister from iPOPO events
self._ipopo.remove_listener(self)
@staticmethod
def _compute_properties(component):
"""
Computes the configuration properties if the given component
"""
# Copy existing properties
properties = component.properties.copy()
# TODO: prepares properties (filters...)
# TODO: add position information (name, node, isolate, ...)
# Export the component interfaces
properties.setdefault(pelix.remote.PROP_EXPORTED_INTERFACES, "*")
return properties
def __try_instantiate(self, component):
"""
Tries to instantiate a component
:param component: A component bean
:return: True if the component has been validated, False if its factory
is missing
:raise Exception: Error instantiating the component
"""
try:
# Prepare properties (filters...)
factory = component.factory
properties = self._compute_properties(component)
# Instantiate the component
self._ipopo.instantiate(factory, component.name, properties)
# Component instantiated
try:
remaining = self.__remaining[factory]
remaining.discard(component)
if not remaining:
del self.__remaining[factory]
except KeyError:
# Component wasn't a remaining one
pass
# Store it
self.__components.setdefault(factory, set()).add(component)
return True
except TypeError:
# Missing factory: maybe later
_logger.warning("iPOPO agent: factory missing for %s", component)
return False
def handle(self, components):
"""
Tries to instantiate the given components immediately and stores the
remaining ones to instantiate them as soon as possible
:param components: A set of RawComponent beans
:return: The immediately instantiated components
"""
with self.__lock:
# Beans of the components to instantiate
components = set(components)
instantiated = set()
for component in components:
try:
# Check if component is already running
stored = self.__names[component.name]
if stored in self.__components[component.factory]:
# Already running
_logger.debug("%s is already running...",
component.name)
continue
elif stored in self.__remaining[component.factory]:
# Already in the remaining list, use the stored bean
# -> this will avoid different hashes due to network
# transmission
component = stored
except KeyError:
# Not yet known component
pass
# Store the name
self.__names[component.name] = component
try:
# Try instantiation (updates local storage)
if self.__try_instantiate(component):
instantiated.add(component)
else:
# Factory not found, keep track of the component
self.__remaining.setdefault(component.factory, set()) \
.add(component)
except Exception as ex:
# Other errors
_logger.exception("Error instantiating component %s: %s",
component, ex)
return instantiated
def kill(self, name):
"""
Kills the component with the given name
:param name: Name of the component to kill
:raise KeyError: Unknown component
"""
with self.__lock:
# Get the component bean
component = self.__names.pop(name)
# Bean storage
storage = self.__components
try:
# Kill the component
self._ipopo.kill(name)
except ValueError:
# iPOPO didn't know about the component,
# remove it from the remaining ones
storage = self.__remaining
else:
# Bean is stored in the instantiated components dictionary
storage = self.__components
try:
# Clean up the storage
components = storage[component.factory]
components.remove(component)
if not components:
del storage[component.factory]
except KeyError:
# Strange: the component is not where it is supposed to be
_logger.warning("Component %s is not stored where it is "
"supposed to be (%s components)", name,
"instantiated" if storage is self.__components
else "remaining")
return
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
from StringIO import StringIO
from lxml import etree
__all__ = ['FileExistsInProjectError', 'FileNotInProjectError', 'ProjectStore']
class FileExistsInProjectError(Exception):
pass
class FileNotInProjectError(Exception):
pass
class ProjectStore(object):
"""Basic project file container."""
# INITIALIZERS #
def __init__(self):
self._files = {}
self._sourcefiles = []
self._targetfiles = []
self._transfiles = []
self.settings = {}
self.convert_map = {}
# The above map maps the conversion of input files (keys) to its output
# file and template used (2-tuple). All values are project file names.
# eg. convert_map = {
# 'sources/doc.odt': ('trans/doc.odt.xlf', None),
# 'trans/doc.odt.xlf': ('targets/doc.odt', 'sources/doc.odt')
#}
# The following dict groups together sets of mappings from a file
# "type" string ("src", "tgt" or "trans") to various other values
# or objects.
self.TYPE_INFO = {
# type => prefix for new files
'f_prefix': {
'src': 'sources/',
'tgt': 'targets/',
'trans': 'trans/',
},
# type => list containing filenames for that type
'lists': {
'src': self._sourcefiles,
'tgt': self._targetfiles,
'trans': self._transfiles,
},
# type => next type in process: src => trans => tgt
'next_type': {
'src': 'trans',
'trans': 'tgt',
'tgt': None,
},
# type => name of the sub-section in the settings file/dict
'settings': {
'src': 'sources',
'tgt': 'targets',
'trans': 'transfiles',
}
}
def __del__(self):
try:
self.close()
except Exception:
pass
# ACCESSORS #
def _get_sourcefiles(self):
"""Read-only access to ``self._sourcefiles``."""
return tuple(self._sourcefiles)
sourcefiles = property(_get_sourcefiles)
def _get_targetfiles(self):
"""Read-only access to ``self._targetfiles``."""
return tuple(self._targetfiles)
targetfiles = property(_get_targetfiles)
def _get_transfiles(self):
"""Read-only access to ``self._transfiles``."""
return tuple(self._transfiles)
transfiles = property(_get_transfiles)
# SPECIAL METHODS #
def __in__(self, lhs):
"""@returns ``True`` if ``lhs`` is a file name or file object in the project store."""
return lhs in self._sourcefiles or \
lhs in self._targetfiles or \
lhs in self._transfiles or \
lhs in self._files or \
lhs in self._files.values()
# METHODS #
def append_file(self, afile, fname, ftype='trans', delete_orig=False):
"""Append the given file to the project with the given filename, marked
to be of type ``ftype`` ('src', 'trans', 'tgt').
:type delete_orig: bool
:param delete_orig: Whether or not the original (given) file should
be deleted after being appended. This is set to
``True`` by
:meth:`~translate.storage.project.convert_forward`
. Not used in this class."""
if not ftype in self.TYPE_INFO['f_prefix']:
raise ValueError('Invalid file type: %s' % (ftype))
if isinstance(afile, basestring) and os.path.isfile(afile) and not fname:
# Try and use afile as the file name
fname, afile = afile, open(afile)
# Check if we can get an real file name
realfname = fname
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'name', None)
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'filename', None)
if not realfname or not os.path.isfile(realfname):
realfname = None
# Try to get the file name from the file object, if it was not given:
if not fname:
fname = getattr(afile, 'name', None)
if not fname:
fname = getattr(afile, 'filename', None)
fname = self._fix_type_filename(ftype, fname)
if not fname:
raise ValueError('Could not deduce file name and none given')
if fname in self._files:
raise FileExistsInProjectError(fname)
if realfname is not None and os.path.isfile(realfname):
self._files[fname] = realfname
else:
self._files[fname] = afile
self.TYPE_INFO['lists'][ftype].append(fname)
return afile, fname
def append_sourcefile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='src')
def append_targetfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='tgt')
def append_transfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='trans')
def remove_file(self, fname, ftype=None):
"""Remove the file with the given project name from the project.
If the file type ('src', 'trans' or 'tgt') is not given, it is
guessed."""
if fname not in self._files:
raise FileNotInProjectError(fname)
if not ftype:
# Guess file type (source/trans/target)
for ft, prefix in self.TYPE_INFO['f_prefix'].items():
if fname.startswith(prefix):
ftype = ft
break
self.TYPE_INFO['lists'][ftype].remove(fname)
if self._files[fname] and hasattr(self._files[fname], 'close'):
self._files[fname].close()
del self._files[fname]
def remove_sourcefile(self, fname):
self.remove_file(fname, ftype='src')
def remove_targetfile(self, fname):
self.remove_file(fname, ftype='tgt')
def remove_transfile(self, fname):
self.remove_file(fname, ftype='trans')
def close(self):
self.save()
def get_file(self, fname, mode='rb'):
"""Retrieve the file with the given name from the project store.
The file is looked up in the ``self._files`` dictionary. The values
in this dictionary may be ``None``, to indicate that the file is not
cacheable and needs to be retrieved in a special way. This special
way must be defined in this method of sub-classes. The value may
also be a string, which indicates that it is a real file accessible
via ``open``.
:type mode: str
:param mode: The mode in which to re-open the file (if it is closed).
"""
if fname not in self._files:
raise FileNotInProjectError(fname)
rfile = self._files[fname]
if isinstance(rfile, basestring):
rfile = open(rfile, 'rb')
# Check that the file is actually open
if getattr(rfile, 'closed', False):
rfname = fname
if not os.path.isfile(rfname):
rfname = getattr(rfile, 'name', None)
if not rfile or not os.path.isfile(rfname):
rfname = getattr(rfile, 'filename', None)
if not rfile or not os.path.isfile(rfname):
raise IOError('Could not locate file: %s (%s)' % (rfile, fname))
rfile = open(rfname, mode)
self._files[fname] = rfile
return rfile
def get_filename_type(self, fname):
"""Get the type of file ('src', 'trans', 'tgt') with the given name."""
for ftype in self.TYPE_INFO['lists']:
if fname in self.TYPE_INFO['lists'][ftype]:
return ftype
raise FileNotInProjectError(fname)
def get_proj_filename(self, realfname):
"""Try and find a project file name for the given real file name."""
for fname in self._files:
if fname == realfname or self._files[fname] == realfname:
return fname
raise ValueError('Real file not in project store: %s' % (realfname))
def load(self, *args, **kwargs):
"""Load the project in some way. Undefined for this (base) class."""
pass
def save(self, filename=None, *args, **kwargs):
"""Save the project in some way. Undefined for this (base) class."""
pass
def update_file(self, pfname, infile):
"""Remove the project file with name ``pfname`` and add the contents
from ``infile`` to the project under the same file name.
:returns: the results from :meth:`ProjectStore.append_file`."""
ftype = self.get_filename_type(pfname)
self.remove_file(pfname)
self.append_file(infile, pfname, ftype)
def _fix_type_filename(self, ftype, fname):
"""Strip the path from the filename and prepend the correct prefix."""
path, fname = os.path.split(fname)
return self.TYPE_INFO['f_prefix'][ftype] + fname
def _generate_settings(self):
"""@returns A XML string that represents the current settings."""
xml = etree.Element('translationproject')
# Add file names to settings XML
if self._sourcefiles:
sources_el = etree.Element('sources')
for fname in self._sourcefiles:
src_el = etree.Element('filename')
src_el.text = fname
sources_el.append(src_el)
xml.append(sources_el)
if self._transfiles:
transfiles_el = etree.Element('transfiles')
for fname in self._transfiles:
trans_el = etree.Element('filename')
trans_el.text = fname
transfiles_el.append(trans_el)
xml.append(transfiles_el)
if self._targetfiles:
target_el = etree.Element('targets')
for fname in self._targetfiles:
tgt_el = etree.Element('filename')
tgt_el.text = fname
target_el.append(tgt_el)
xml.append(target_el)
# Add conversion mappings
if self.convert_map:
conversions_el = etree.Element('conversions')
for in_fname, (out_fname, templ_fname) in self.convert_map.iteritems():
if in_fname not in self._files or out_fname not in self._files:
continue
conv_el = etree.Element('conv')
input_el = etree.Element('input')
input_el.text = in_fname
conv_el.append(input_el)
output_el = etree.Element('output')
output_el.text = out_fname
conv_el.append(output_el)
if templ_fname:
templ_el = etree.Element('template')
templ_el.text = templ_fname
conv_el.append(templ_el)
conversions_el.append(conv_el)
xml.append(conversions_el)
# Add options to settings
if 'options' in self.settings:
options_el = etree.Element('options')
for option, value in self.settings['options'].items():
opt_el = etree.Element('option')
opt_el.attrib['name'] = option
opt_el.text = value
options_el.append(opt_el)
xml.append(options_el)
return etree.tostring(xml, pretty_print=True)
def _load_settings(self, settingsxml):
"""Load project settings from the given XML string.
``settingsxml`` is parsed into a DOM tree (``lxml.etree.fromstring``)
which is then inspected."""
settings = {}
xml = etree.fromstring(settingsxml)
# Load files in project
for section in ('sources', 'targets', 'transfiles'):
groupnode = xml.find(section)
if groupnode is None:
continue
settings[section] = []
for fnode in groupnode.getchildren():
settings[section].append(fnode.text)
conversions_el = xml.find('conversions')
if conversions_el is not None:
self.convert_map = {}
for conv_el in conversions_el.iterchildren():
in_fname, out_fname, templ_fname = None, None, None
for child_el in conv_el.iterchildren():
if child_el.tag == 'input':
in_fname = child_el.text
elif child_el.tag == 'output':
out_fname = child_el.text
elif child_el.tag == 'template':
templ_fname = child_el.text
# Make sure that in_fname and out_fname exist in
# settings['sources'], settings['targets'] or
# settings['transfiles']
in_found, out_found, templ_found = False, False, False
for section in ('sources', 'transfiles', 'targets'):
if section not in settings:
continue
if in_fname in settings[section]:
in_found = True
if out_fname in settings[section]:
out_found = True
if templ_fname and templ_fname in settings[section]:
templ_found = True
if in_found and out_found and (not templ_fname or templ_found):
self.convert_map[in_fname] = (out_fname, templ_fname)
# Load options
groupnode = xml.find('options')
if groupnode is not None:
settings['options'] = {}
for opt in groupnode.iterchildren():
settings['options'][opt.attrib['name']] = opt.text
self.settings = settings
| |
""" Imports Maya API methods in the 'api' namespace, and defines various utilities for Python<->API communication """
# They will be imported / redefined later in Pymel, but we temporarily need them here
import inspect
import re
import itertools
import pymel.api as api
import pymel.versions as versions
import pymel.util as _util
import startup
import plogging as _plogging
from pymel.api.plugins import mpxNamesToApiEnumNames
_logger = _plogging.getLogger(__name__)
if versions.current() < versions.v2014:
NUCLEUS_MFNDAG_BUG = True
SYMMETRY_CONSTRAINT_MFNDAG_BUG = False
elif versions.current() < versions.v2015:
NUCLEUS_MFNDAG_BUG = False
SYMMETRY_CONSTRAINT_MFNDAG_BUG = True
else:
NUCLEUS_MFNDAG_BUG = False
SYMMETRY_CONSTRAINT_MFNDAG_BUG = False
#===============================================================================
# Utility classes
#===============================================================================
class ApiEnum(tuple):
def __str__(self):
return '.'.join([str(x) for x in self])
def __repr__(self):
return '%s( %s )' % (self.__class__.__name__, super(ApiEnum, self).__repr__())
def pymelName(self):
import pymel.internal.factories as factories
parts = list(self)
pymelName = factories.apiClassNameToPymelClassName(self[0])
if pymelName is not None:
parts[0] = pymelName
return '.'.join([str(x) for x in parts])
if versions.current() < versions.v2012:
# Before 2012, api had Enum, and when we unpickle the caches, it will
# need to be there... could rebuild the caches (like I had to do with
# mayaApiMelBridge) but don't really want to...
api.Enum = ApiEnum
Enum = ApiEnum
def _defaultdictdict(cls, val=None):
if val is None:
return _util.defaultdict(dict)
else:
return _util.defaultdict(dict, val)
#===============================================================================
# ghost objects
#===============================================================================
class GhostObjsOkHere(object):
_OK = False
@classmethod
def OK(cls):
return cls._OK
def __enter__(self):
self.oldOK = self.OK()
type(self)._OK = True
return self
def __exit__(self, exc_type, exc_value, traceback):
type(self)._OK = self.oldOK
def _makeDgModGhostObject(mayaType, dagMod, dgMod):
if versions.current() >= versions.v2012:
# only time post-2012 when we should have to call this func is when
# rebuilding caches - ie, running from inside ApiCache
if not GhostObjsOkHere.OK():
_logger.raiseLog(_logger.WARNING, '_makeDgModGhostObject should be '
'unnecessary in maya versions '
'past 2012 (except when '
'rebuilding cache) - was making '
'a {!r} object'.format(mayaType))
_logger.debug("Creating ghost node: %s" % mayaType)
# we create a dummy object of this type in a dgModifier (or dagModifier)
# as the dgModifier.doIt() method is never called, the object
# is never actually created in the scene
# Note: at one point, if we didn't call the dgMod/dagMod.deleteNode method,
# and we call this function while loading a scene (for instance, if the scene requires
# a plugin that isn't loaded, and defines custom node types), then the nodes were still
# somehow created, despite never explicitly calling doIt()...
# ... however, this seems to no longer be the case, and the deleteNode calls are apparently
# harmful
if type(dagMod) is not api.MDagModifier or type(dgMod) is not api.MDGModifier:
raise ValueError, "Need a valid MDagModifier and MDGModifier or cannot return a valid MObject"
# Regardless of whether we're making a DG or DAG node, make a parent first -
# for some reason, this ensures good cleanup (don't ask me why...??)
parent = dagMod.createNode('transform', api.MObject())
try:
# DependNode
obj = dgMod.createNode(mayaType)
except RuntimeError:
# DagNode
try:
obj = dagMod.createNode(mayaType, parent)
except Exception, err:
_logger.debug("Error trying to create ghost node for '%s': %s" % (mayaType, err))
return None
if api.isValidMObject(obj):
return obj
else:
_logger.debug("Error trying to create ghost node for '%s'" % mayaType)
return None
class InvalidNodeTypeError(Exception):
pass
class ManipNodeTypeError(InvalidNodeTypeError):
pass
class _GhostObjMaker(object):
'''Context used to get an mobject which we can query within this context.
Automatically does any steps need to create and destroy the mobj within
the context
(Note - None may be returned in the place of any mobj)
'''
def __init__(self, mayaTypes, dagMod=None, dgMod=None, manipError=True,
multi=False):
self.multi = multi
if not multi:
mayaTypes = [mayaTypes]
self.mayaTypes = mayaTypes
if dagMod is None:
dagMod = api.MDagModifier()
if dgMod is None:
dgMod = api.MDGModifier()
self.dagMod = dagMod
self.dgMod = dgMod
self.dagGhosts = False
self.dgGhosts = False
#self.theMod = None
self.manipError = manipError
self.byMayaType = {}
self.ghosts = set()
def __enter__(self):
import maya.cmds as cmds
for mayaType in self.mayaTypes:
# check if an obj of the given type already exists in the scene, and
# if so, use it
madeGhost = False
allObj = cmds.ls(exactType=mayaType)
if allObj:
obj = api.toMObject(allObj[0])
else:
if mayaType in ApiCache.CRASH_TYPES:
if self.manipError and "Manip" in mayaType:
raise ManipNodeTypeError
obj = None
else:
obj = _makeDgModGhostObject(mayaType, self.dagMod, self.dgMod)
if obj is not None:
self.ghosts.add(mayaType)
madeGhost = True
if obj is not None:
if (self.manipError
and (obj.hasFn(api.MFn.kManipulator)
or obj.hasFn(api.MFn.kManipContainer)
or obj.hasFn(api.MFn.kPluginManipContainer)
or obj.hasFn(api.MFn.kPluginManipulatorNode)
or obj.hasFn(api.MFn.kManipulator2D)
or obj.hasFn(api.MFn.kManipulator3D)
or obj.hasFn(api.MFn.kManip2DContainer)
)
):
raise ManipNodeTypeError
if madeGhost and not (self.dagGhosts and self.dgGhosts):
if obj.hasFn(api.MFn.kDagNode):
self.dagGhosts = True
else:
self.dgGhosts = True
self.byMayaType[mayaType] = obj
# Note that we always create a "real" instance of the object by
# calling doIt()... we used to not call doIt(), in which case
# the mobject would actually still be queryable, but not in the
# scene - thus the "ghost" obj - but this would create problems in
# some cases - ie, if this was triggered during reference loading,
# the objects would actually be entered into the scene... and
# because we didn't call undoIt, they wouldn't get cleaned up
if self.dagGhosts:
self.dagMod.doIt()
if self.dgGhosts:
self.dgMod.doIt()
if self.multi:
return self.byMayaType
else:
return obj
def __exit__(self, exc_type, exc_value, traceback):
try:
if self.dagGhosts:
self.dagMod.undoIt()
if self.dgGhosts:
self.dgMod.undoIt()
except RuntimeError:
stillExist = []
for mayaType in self.ghosts:
obj = self.byMayaType[mayaType]
if obj is not None and api.isValidMObjectHandle(api.MObjectHandle(obj)):
stillExist.append(obj)
if stillExist:
mfnDag = api.MFnDagNode()
mfnDep = api.MFnDependencyNode()
names = []
for obj in stillExist:
if obj.hasFn(api.MFn.kDagNode):
# we need to delete the parent, since it will have
# created a parent transform too
mfnDag.setObject(obj)
mfnDag.setObject(mfnDag.parent(0))
names.append(mfnDag.partialPathName())
else:
mfnDep.setObject(obj)
names.append(mfnDep.name())
print names
#import maya.cmds as cmds
# cmds.delete(names)
mfnDag = api.MFnDagNode()
dagMod = api.MDagModifier()
dgMod = api.MDGModifier()
delDag = False
delDg = False
for obj in stillExist:
if obj.hasFn(api.MFn.kDagNode):
# we need to delete the parent, since it will have
# created a parent transform too
mfnDag.setObject(obj)
dagMod.deleteNode(mfnDag.parent(0))
else:
dgMod.deleteNode(obj)
if delDag:
dagMod.doIt()
if delDg:
dgMod.doIt()
#===============================================================================
# Utilities for query maya node info
#===============================================================================
_ABSTRACT_SUFFIX = ' (abstract)'
_ASSET_PREFIX = 'adskAssetInstanceNode_'
if hasattr(api, 'MNodeClass'):
# if we have MNodeClass, this is easy...
def isPluginNode(nodeName):
try:
api.MNodeClass(nodeName).pluginName()
return True
except RuntimeError:
return False
else:
# otherwise, we have to query all plugins...
def isPluginNode(nodeName):
import maya.cmds as cmds
for plugin in cmds.pluginInfo(q=1, listPlugins=True):
plugNodes = cmds.pluginInfo(plugin, q=1, dependNode=True)
# plugNodes may be None...
if plugNodes and nodeName in plugNodes:
return True
return False
# You'd think getting a comprehensive list of node types would be easy, but
# due to strange behavior of various edge cases, it can be tricky...
def _getMayaTypes(real=True, abstract=True, basePluginTypes=True, addAncestors=True,
noManips=True, noPlugins=False, returnRealAbstract=False):
'''Returns a list of maya types
Parameters
----------
real : bool
Include the set of real/createable nodes
abstract : bool
Include the set of abstract nodes (as defined by allNodeTypes(includeAbstract=True)
basePluginTypes : bool
Include the set of "base" plugin maya types (these are not returned by
allNodeTypes(includeAbstract=True), and so, even though these types are
abstract, this set shares no members with those added by the abstract
flag
addAncestors : bool
If true, add to the list of nodes returned all of their ancestors as
well
noManips : bool | 'fast'
If true, filter out any manipulator node types; if the special value
'fast', then it will filter out manipulator node types, but will do so
using a faster method that may potentially be less thorough
noPlugins : bool
If true, filter out any nodes defined in plugins (note - if
basePluginTypes is True, and noPlugins is False, the basePluginTypes
will still be returned, as these types are not themselves defined in
the plugin)
returnRealAbstract : bool
if True, will return two sets, realNodes and abstractNodes; otherwise,
returns a single set of all the desired nodes (more precisely, realNodes
is defined as the set of directly createdable nodes matching the
criteria, and abstract are all non-createable nodes matching the
criteria)
'''
import maya.cmds as cmds
# keep track of which nodes were abstract - this can be useful later,
# especially pre-2012
abstractNodes = set()
realNodes = set()
if abstract or addAncestors:
# if we want abstract, need to do extra processing to strip the
# trailing ' (abstract)'
raw = cmds.allNodeTypes(includeAbstract=True)
for node in raw:
if node.endswith(_ABSTRACT_SUFFIX):
node = node[:-len(_ABSTRACT_SUFFIX)]
# For some reason, maya returns these names with cmds.allNodeTypes(includeAbstract=True):
# adskAssetInstanceNode_TlightShape
# adskAssetInstanceNode_TdnTx2D
# adskAssetInstanceNode_TdependNode
# ...but they show up in parent hierarchies with a 'T' in front, ie:
# cmds.nodeType(adskMaterial, isTypeName=True, inherited=True)
# == [u'TadskAssetInstanceNode_TdependNode', u'adskMaterial']
# the 'T' form is also what is needed to use it as an arg to nodeType...
# ...so, stick the 'T' in front...
if node.startswith(_ASSET_PREFIX):
node = 'T' + node
abstractNodes.add(node)
else:
if not real:
continue
realNodes.add(node)
elif real:
realNodes.update(cmds.allNodeTypes())
if basePluginTypes:
import pymel.api.plugins
abstractNodes.update(pymel.api.plugins.pluginMayaTypes)
# If we're doing addAncestors anyway, might was well get manips with the
# more thorough method, using the inheritance chain, since we're doing that
# anyway...
if noManips == 'fast' and not addAncestors:
manips = set(cmds.nodeType('manip3D', isTypeName=1, derived=1))
realNodes.difference_update(manips)
abstractNodes.difference_update(manips)
noManips = False
if addAncestors or noManips:
# There are a few nodes which will not be returned even by
# allNodeTypes(includeAbstract=True), but WILL show up in the
# inheritance hierarchies...
# iterate over first real nodes, then abstract nodes... this lets us
# take advantage of inheritance caching - especially pre-2012, where
# inheritance chain of abstract nodes is not directly queryable -
# since getInheritance will cache the inheritance chain of the given
# node, AND all its parents
# make a copy of what we iterate over, as we will be modifying
# realNodes and abstractNodes as we go...
for mayaType in list(itertools.chain(realNodes, abstractNodes)):
try:
ancestors = getInheritance(mayaType, checkManip3D=noManips)
except ManipNodeTypeError:
realNodes.discard(mayaType)
abstractNodes.discard(mayaType)
except RuntimeError:
# was an error querying - happens with some node types, like
# adskAssetInstanceNode_TdnTx2D
continue
else:
if addAncestors and ancestors:
abstractNodes.update(set(ancestors) - realNodes)
if noPlugins:
for nodeSet in (realNodes, abstractNodes):
# need to modify in place, so make copy of nodeSet...
for node in list(nodeSet):
if isPluginNode(node):
nodeSet.remove(node)
# we may have put nodes in realNodes or abstractNodes for info purposes...
# make sure they are cleared before returning results, if needed...
if not real:
realNodes = set()
if not abstract:
abstractNodes = set()
if returnRealAbstract:
return realNodes, abstractNodes
else:
return realNodes | abstractNodes
def _getAbstractMayaTypes(**kwargs):
kwargs.setdefault('real', False)
kwargs['abstract'] = True
return _getMayaTypes(**kwargs)
def _getRealMayaTypes(**kwargs):
kwargs['real'] = True
kwargs.setdefault('abstract', False)
kwargs.setdefault('basePluginTypes', False)
kwargs.setdefault('addAncestors', False)
return _getMayaTypes(**kwargs)
def _getAllMayaTypes(**kwargs):
kwargs['real'] = True
kwargs['abstract'] = True
return _getMayaTypes(**kwargs)
_fixedLineages = {}
_cachedInheritances = {}
def getInheritance(mayaType, checkManip3D=True, checkCache=True,
updateCache=True):
"""Get parents as a list, starting from the node after dependNode, and
ending with the mayaType itself.
Raises a ManipNodeTypeError if the node type fed in was a manipulator
"""
# To get the inheritance post maya2012, we use nodeType(isTypeName=True),
# which means we don't need a real node. However, in maya < 2012, nodeType
# requires a real node. To do get these without poluting the scene we use the
# _GhostObjMaker, which on enter, uses a dag/dg modifier, and calls the doIt
# method; we then get the lineage, and on exit, it calls undoIt.
global _cachedInheritances
if checkCache and mayaType in _cachedInheritances:
return _cachedInheritances[mayaType]
import maya.cmds as cmds
lineage = None
if versions.current() >= versions.v2012:
# We now have nodeType(isTypeName)! yay!
try:
lineage = cmds.nodeType(mayaType, isTypeName=True, inherited=True)
except RuntimeError:
pass
else:
with _GhostObjMaker(mayaType) as obj:
if obj is not None:
if obj.hasFn(api.MFn.kDagNode):
name = api.MFnDagNode(obj).partialPathName()
else:
name = api.MFnDependencyNode(obj).name()
if not obj.isNull() and not obj.hasFn(api.MFn.kManipulator3D) and not obj.hasFn(api.MFn.kManipulator2D):
lineage = cmds.nodeType(name, inherited=1)
if lineage is None:
global _fixedLineages
if not _fixedLineages:
if versions.current() >= versions.v2012:
controlPoint = cmds.nodeType('controlPoint', isTypeName=True,
inherited=True)
else:
controlPoint = [u'containerBase',
u'entity',
u'dagNode',
u'shape',
u'geometryShape',
u'deformableShape',
u'controlPoint']
# maya2013 introduced shadingDependNode...
if versions.current() >= versions.v2013:
texture2d = ['shadingDependNode', 'texture2d']
else:
texture2d = ['texture2d']
# For whatever reason, nodeType(isTypeName) returns
# None for the following mayaTypes:
_fixedLineages = {
'node': [],
'file': texture2d + [u'file'],
'lattice': controlPoint + [u'lattice'],
'mesh': controlPoint + [u'surfaceShape', u'mesh'],
'nurbsCurve': controlPoint + [u'curveShape', u'nurbsCurve'],
'nurbsSurface': controlPoint + [u'surfaceShape', u'nurbsSurface'],
'time': [u'time']
}
if mayaType in _fixedLineages:
lineage = _fixedLineages[mayaType]
else:
raise RuntimeError("Could not query the inheritance of node type %s" % mayaType)
elif checkManip3D and 'manip3D' in lineage:
raise ManipNodeTypeError
try:
assert (mayaType == 'node' and lineage == []) or lineage[-1] == mayaType
except Exception:
print mayaType, lineage
raise
if len(set(lineage)) != len(lineage):
# cyclical lineage: first discovered with xgen nodes.
# might be a result of multiple inheritance being returned strangely by nodeType.
#
# an example lineage is:
# [u'containerBase', u'entity', u'dagNode', u'shape', u'geometryShape', u'locator', u'THlocatorShape', u'SphereLocator',
# u'containerBase', u'entity', u'dagNode', u'shape', u'geometryShape', u'locator', u'THlocatorShape', u'aiSkyDomeLight']
# note the repeat - we will try to fix lineages like this, resolving to:
# [u'containerBase', u'entity', u'dagNode', u'shape', u'geometryShape', u'locator', u'THlocatorShape', u'SphereLocator', u'aiSkyDomeLight']
# first pop the rightmost element, which is the mayaType...
if lineage.pop() != mayaType:
raise RuntimeError("lineage for %s did not end with it's own node type" % mayaType)
# then try to find the first element somewhere else - this should indicate the start of the repeated chain...
try:
nextIndex = lineage.index(lineage[0], 1)
except ValueError:
# unknown case, don't know how to fix...
pass
else:
firstLineage = lineage[:nextIndex]
secondLineage = lineage[nextIndex:]
if len(firstLineage) < len(secondLineage):
shorter = firstLineage
longer = secondLineage
else:
shorter = secondLineage
longer = firstLineage
if longer[:len(shorter)] == shorter:
# yay! we know how to fix!
lineage = longer
lineage.append(mayaType)
if updateCache and lineage:
if len(set(lineage)) != len(lineage):
# cyclical lineage: first discovered with xgen nodes.
# might be a result of multiple inheritance being returned strangely by nodeType.
print mayaType, lineage
_logger.raiseLog(_logger.WARNING, "lineage for node %s is cyclical: %s" % (mayaType, lineage))
_cachedInheritances[mayaType] = lineage
# don't cache any of the parents
return lineage
# add not just this lineage, but all parent's lineages as well...
for i in xrange(len(lineage), 0, -1):
thisLineage = lineage[:i]
thisNode = thisLineage[-1]
oldVal = _cachedInheritances.get(thisNode)
if oldVal is None:
_cachedInheritances[thisNode] = thisLineage
elif oldVal != thisLineage:
_logger.raiseLog(_logger.WARNING, "lineage for node %s changed:\n from %s\n to %s)" % (thisNode, oldVal, thisLineage))
_cachedInheritances[thisNode] = thisLineage
return lineage
#===============================================================================
# Name utilities
#===============================================================================
def nodeToApiName(nodeName):
return 'k' + _util.capitalize(nodeName)
def getLowerCaseMapping(names):
uniqueLowerNames = {}
multiLowerNames = {}
for name in names:
lowerType = name.lower()
if lowerType in multiLowerNames:
multiLowerNames[lowerType].append(name)
elif lowerType in uniqueLowerNames:
multiLowerNames[lowerType] = [uniqueLowerNames.pop(lowerType), name]
else:
uniqueLowerNames[lowerType] = name
return uniqueLowerNames, multiLowerNames
API_NAME_MODIFIERS = {
'base': '',
'abstract': '',
'node': '',
'shape': '',
'mod(?!(ify|ifier))': 'modify',
'mod(?!(ify|ifier))': 'modifier',
'modifier': 'mod',
'modify': 'mod',
'poly(?!gon)': 'polygon',
'polygon': 'poly',
'vert(?!(ex|ice))': 'vertex',
'vert(?!(ex|ice))': 'vertice',
'vertice': 'vert',
'vertex': 'vert',
'subd(?!iv)': 'subdiv',
'subd(?!iv)': 'subdivision',
'subdiv(?!ision)': 'subd',
'subdiv(?!ision)': 'subdivision',
'subdivision': 'subd',
'subdivision': 'subdiv',
'^th(custom)?': 'plugin',
}
API_NAME_MODIFIERS = [(re.compile(find), replace)
for find, replace in API_NAME_MODIFIERS.iteritems()]
apiSuffixes = ['', 'node', 'shape', 'shapenode']
#===============================================================================
# Cache classes
#===============================================================================
class ApiMelBridgeCache(startup.SubItemCache):
NAME = 'mayaApiMelBridge'
DESC = 'the API-MEL bridge'
COMPRESSED = True
USE_VERSION = False
_CACHE_NAMES = '''apiToMelData apiClassOverrides'''.split()
CACHE_TYPES = {'apiToMelData': _defaultdictdict}
STORAGE_TYPES = {'apiToMelData': dict}
class ApiCache(startup.SubItemCache):
NAME = 'mayaApi'
DESC = 'the API cache'
COMPRESSED = True
USE_VERSION = True
_CACHE_NAMES = '''apiTypesToApiEnums apiEnumsToApiTypes mayaTypesToApiTypes
apiTypesToApiClasses apiClassInfo'''.split()
EXTRA_GLOBAL_NAMES = tuple(['mayaTypesToApiEnums'])
# Descriptions of various elements:
# Maya static info :
# Initializes various static look-ups to speed up Maya types conversions
# self.apiClassInfo
# self.apiTypesToApiEnums
# self.apiEnumsToApiTypes
# self.apiTypesToApiClasses
# Lookup of currently existing Maya types as keys with their corresponding API type as values.
# Not a read only (static) dict as these can change (if you load a plugin)
# self.mayaTypesToApiTypes
# lookup tables for a direct conversion between Maya type to their MFn::Types enum
# self.mayaTypesToApiEnums
# creating these will crash Maya!
CRASH_TYPES = {
'xformManip': 'kXformManip',
'moveVertexManip': 'kMoveVertexManip',
}
# For some reason, a bunch of nodes crashed Maya 2016 Ext1, but they
# apparently worked with 2016.5 / 2016 Ext2 (since it didn't crash when I
# built it's cache - though it was a pre-release, so perhaps it didn't have
# all plugins?)
if versions.v2016_EXT1 <= versions.current() < versions.v2016_EXT2:
CRASH_TYPES.update({
'type': 'kPluginDependNode',
'vectorExtrude': 'kPluginDependNode',
'shellDeformer': 'kPluginDependNode',
'displayPoints': 'kPluginLocatorNode',
'svgToPoly': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
'vectorAdjust': 'kPluginDeformerNode',
'objectGrpToComp': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
'objectGrpToComp': 'kPluginDependNode',
})
# hold any overrides for mayaTypesToApiTypes...
# ie, for cases where the name guess is wrong, or for weird plugin types
# that don't inherit from an mpx type (ie, vectorRenderGlobals), etc
MAYA_TO_API_OVERRIDES = {
# this what is returned by
# allNodeTypes(includeAbstract=True)
'node': 'kDependencyNode',
# this is the name pymel uses
'dependNode': 'kDependencyNode',
# a strange one - a plugin node that has an
# apitype... is in studioImport.so... also has a
# doc entry...
'smear': 'kSmear',
# plugin node that's not in all distributions
# (ie, it's missing in Linux), so just include it
# here
'vectorRenderGlobals': 'kDependencyNode',
}
# TODO: if nucleus/symmetryConstraint bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of Nucleus's parent to DependNode
# - remove 2 checks in allapi.toApiObject for objects which can have an
# MDagPath but can't use MFnDagNode
API_TO_MFN_OVERRIDES = {
'kHikHandle': api.MFnTransform, # hikHandle inherits from ikHandle, but is not compatible with MFnIkHandle
'kFfdDualBase': api.MFnDependencyNode, # jointFfd inherits from ffd, but is not compatible with MFnLatticeDeformer
'kTransferAttributes': api.MFnDependencyNode, # transferAttributes inherits from weightGeometryFilter, but is not compatible with MFnWeightGeometryFilter or MFnGeometryFilter
}
if NUCLEUS_MFNDAG_BUG:
# fun one - even though it can be parented and inherits from transform,
# it's incompatible with MFnTransform or even MFnDagNode
API_TO_MFN_OVERRIDES['kNucleus'] = api.MFnDependencyNode
if SYMMETRY_CONSTRAINT_MFNDAG_BUG:
API_TO_MFN_OVERRIDES['kSymmetryConstraint'] = api.MFnDependencyNode
DEFAULT_API_TYPE = 'kDependencyNode'
def __init__(self, docLocation=None):
super(ApiCache, self).__init__()
for name in self.EXTRA_GLOBAL_NAMES:
setattr(self, name, {})
self.docLocation = docLocation
def _buildMayaToApiInfo(self, reservedOnly=True):
self._buildMayaNodeInfo()
# Fixes for types that don't have a MFn by doing a node creation and testing it
unknownTypes = set()
toCreate = []
self.mayaTypesToApiTypes = self._buildMayaReservedTypes()
# do real nodes first - on pre-2012, can't directly query inheritance of
# abstract nodes, so relying on caching of parent hierarchies when
# querying a real hierarchy is the only way to get inheritance info
# for abstract types
if not reservedOnly:
for mayaType in itertools.chain(self.realMayaTypes,
self.abstractMayaTypes):
if mayaType not in self.mayaTypesToApiTypes:
toCreate.append(mayaType)
if toCreate:
# Put in a debug, because ghost nodes can be problematic...
_logger.debug("Starting to create ghost nodes...")
with GhostObjsOkHere():
with _GhostObjMaker(toCreate, manipError=False, multi=True) as typeToObj:
for mayaType in toCreate:
obj = typeToObj[mayaType]
if obj:
apiType = obj.apiTypeStr()
self.mayaTypesToApiTypes[mayaType] = apiType
else:
unknownTypes.add(mayaType)
# Put in a debug, because ghost nodes can be problematic...
_logger.debug("...finished creating ghost nodes")
if len(unknownTypes) > 0:
_logger.warn("Unable to get maya-to-api type info for the following nodes: %s" % ", ".join(unknownTypes))
for mayaType in unknownTypes:
# For unknown types, use the parent type
try:
inheritance = getInheritance(mayaType)
except (ManipNodeTypeError, RuntimeError):
continue
apiType = None
# if we have a node A, and we get back it's inheritance as:
# [E, D, C, B, A]
# ...and 'D' is the first parent that we can find info for, we
# may as well set the types for 'B' and 'C' parents as well...
# also, this means that we may already have set THIS mayaType
# (if it was the parent of another unknown node we already set),
# so we loop through all nodes in inheritance, including this
# type
toSet = [mayaType]
if inheritance:
for parent in reversed(inheritance):
apiType = self.mayaTypesToApiTypes.get(parent)
if apiType:
break
else:
toSet.append(parent)
if not apiType:
apiType = self.DEFAULT_API_TYPE
for node in toSet:
self.mayaTypesToApiTypes[node] = apiType
for mayaType, apiType in self.mayaTypesToApiTypes.iteritems():
self.addMayaType(mayaType, apiType)
def _buildApiTypesList(self):
"""the list of api types is static. even when a plugin registers a new maya type, it will be associated with
an existing api type"""
self.apiTypesToApiEnums = dict(inspect.getmembers(api.MFn, lambda x: type(x) is int))
self.apiEnumsToApiTypes = dict((self.apiTypesToApiEnums[k], k) for k in self.apiTypesToApiEnums.keys())
def _buildMayaReservedTypes(self, force=False):
"""
Build a list of Maya reserved types.
These cannot be created directly from the API, thus the dgMod trick to
find the corresponding Maya type won't work
"""
reservedMayaTypes = {}
# start with plugin types
import pymel.api.plugins as plugins
for mpxName, mayaNode in plugins.mpxNamesToMayaNodes.iteritems():
reservedMayaTypes[mayaNode] = plugins.mpxNamesToApiEnumNames[mpxName]
for mayaType in self.abstractMayaTypes:
if mayaType in reservedMayaTypes:
continue
apiGuess = self._guessApiTypeByName(mayaType)
if apiGuess:
reservedMayaTypes[mayaType] = apiGuess
reservedMayaTypes.update(self.MAYA_TO_API_OVERRIDES)
reservedMayaTypes.update(self.CRASH_TYPES)
# filter to make sure all these types exist in current version (some are Maya2008 only)
reservedMayaTypes = dict((item[0], item[1])
for item in reservedMayaTypes.iteritems()
if item[1] in self.apiTypesToApiEnums)
return reservedMayaTypes
# TODO: eventually, would like to move the node-heirarchy-building stuff
# from cmdcache into here... we could then cache the node inheritance info,
# instead of constantly re-querying it in various places...
def _buildMayaNodeInfo(self):
'''Stores tempory information about maya nodes + names
'''
if getattr(self, '_builtMayaNodeInfo', False):
return
if not self.apiTypesToApiEnums:
self._buildApiTypesList()
self.realMayaTypes, self.abstractMayaTypes = _getAllMayaTypes(returnRealAbstract=True)
self.allMayaTypes = self.realMayaTypes | self.abstractMayaTypes
self.uniqueLowerMaya, self.multiLowerMaya = getLowerCaseMapping(self.allMayaTypes)
self.allLowerMaya = set(self.uniqueLowerMaya) | set(self.multiLowerMaya)
self.uniqueLowerApi, self.multiLowerApi = getLowerCaseMapping(self.apiTypesToApiEnums)
self._builtMayaNodeInfo = True
return
# _buildMayaNodeInfo must already have been called...
def _guessApiTypeByName(self, nodeName):
# first, try the easy case...
apiName = nodeToApiName(nodeName)
if apiName in self.apiTypesToApiEnums:
return apiName
lowerNode = nodeName.lower()
if lowerNode not in self.uniqueLowerMaya:
return None
# now, try with various modifications...
possibleApiNames = set()
possibleModifications = [(find, replace)
for find, replace in API_NAME_MODIFIERS
if find.search(lowerNode)]
# find all possible combinations of all possible modifications
for modifyNum in xrange(len(possibleModifications) + 1):
for modifyCombo in itertools.combinations(possibleModifications, modifyNum):
baseName = lowerNode
for find, replace in modifyCombo:
baseName = find.sub(replace, baseName)
if not baseName:
# if we've eliminated the name with our changes - ie,
# 'shape' would go to '' - then skip
continue
if baseName != lowerNode and baseName in self.allLowerMaya:
# if after modification, our new name is the name of another
# maya node, skip
continue
apiLower = 'k' + baseName
if apiLower in self.uniqueLowerApi:
possibleApiNames.add(self.uniqueLowerApi[apiLower])
else:
for suffix in apiSuffixes:
apiWithSuffix = apiLower + suffix
if apiWithSuffix in self.uniqueLowerApi:
possibleApiNames.add(self.uniqueLowerApi[apiWithSuffix])
if len(possibleApiNames) == 1:
return list(possibleApiNames)[0]
return None
# Note - it's possible there are multiple substrings of the same length
# that are all "tied" for longest - this method will only return the first
# it finds
@staticmethod
def _longestCommonSubstring(str1, str2):
if str1 == str2:
return [str1]
if len(str1) > len(str2):
longer = str1
shorter = str2
else:
longer = str2
shorter = str1
maxSize = len(shorter)
for strSize in xrange(maxSize, 0, -1):
for startPos in xrange(0, maxSize - strSize + 1):
subStr = shorter[startPos:startPos + strSize]
if subStr in longer:
return subStr
return ''
@staticmethod
def _bestMatches(theStr, otherStrings, minLength=2, caseSensitive=False):
if not caseSensitive:
theStr = theStr.lower()
byLength = {}
for otherString in otherStrings:
if caseSensitive:
compOther = otherString
else:
compOther = otherString.lower()
size = len(_longestCommonSubstring(theStr, compOther))
byLength.setdefault(size, []).append(otherString)
longest = max(byLength)
if longest >= minLength:
return byLength[longest]
else:
return []
def _buildApiClassInfo(self):
_logger.debug("Starting ApiCache._buildApiClassInfo...")
from pymel.internal.parsers import ApiDocParser
self.apiClassInfo = {}
parser = ApiDocParser(api, enumClass=ApiEnum, docLocation=self.docLocation)
for name, obj in inspect.getmembers(api, lambda x: type(x) == type and x.__name__.startswith('M')):
if not name.startswith('MPx'):
try:
info = parser.parse(name)
self.apiClassInfo[name] = info
except (IOError, OSError, ValueError, IndexError), e:
import errno
baseMsg = "failed to parse docs for %r:" % name
if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT:
# If we couldn't parse because we couldn't find the
# file, only raise a warning... there are many classes
# (ie, MClothTriangle) that don't have a doc page...
_logger.warning(baseMsg)
_logger.warning("%s: %s" % (name, e))
else:
import traceback
_logger.error(baseMsg)
_logger.error(traceback.format_exc())
_logger.debug("...finished ApiCache._buildApiClassInfo")
def _buildApiTypeToApiClasses(self):
self.apiTypesToApiClasses = {}
def _MFnType(x):
if x == api.MFnBase:
return self.apiEnumsToApiTypes[1] # 'kBase'
else:
try:
return self.apiEnumsToApiTypes[x().type()]
except:
return self.apiEnumsToApiTypes[0] # 'kInvalid'
# all of maya OpenMaya api is now imported in module api's namespace
mfnClasses = inspect.getmembers(api, lambda x: inspect.isclass(x) and issubclass(x, api.MFnBase))
for name, mfnClass in mfnClasses:
current = _MFnType(mfnClass)
if not current:
_logger.warning("MFnClass gave MFnType %s" % current)
elif current == 'kInvalid':
_logger.warning("MFnClass gave MFnType %s" % current)
else:
self.apiTypesToApiClasses[current] = mfnClass
# we got our map by going from Mfn to enum; however, multiple enums can
# map to the same MFn, so need to fill in the gaps of missing enums for
# enums to MFn...
# we do this by querying the maya hierarchy, and marching up it until
# we find an entry that IS in apiTypesToApiClasses
for mayaType, apiType in self.mayaTypesToApiTypes.iteritems():
if apiType not in self.apiTypesToApiClasses:
self._getOrSetApiClass(apiType, mayaType)
def _getOrSetApiClass(self, apiType, mayaType):
if apiType not in self.apiTypesToApiClasses:
if apiType in self.API_TO_MFN_OVERRIDES:
mfnClass = self.API_TO_MFN_OVERRIDES[apiType]
else:
mfnClass = self._getApiClassFromMayaInheritance(apiType, mayaType)
self.apiTypesToApiClasses[apiType] = mfnClass
return self.apiTypesToApiClasses[apiType]
def _getApiClassFromMayaInheritance(self, apiType, mayaType):
mfnClass = None
try:
inheritance = getInheritance(mayaType)
except Exception:
pass
else:
# inheritance always ends with that node type... so skip that...
for mayaParentType in reversed(inheritance[:-1]):
parentApiType = self.mayaTypesToApiTypes.get(mayaParentType)
if parentApiType:
parentMfn = self.apiTypesToApiClasses.get(parentApiType)
if parentMfn:
mfnClass = parentMfn
break
if not mfnClass:
mfnClass = api.MFnDependencyNode
return mfnClass
def _buildApiRelationships(self):
"""
Used to rebuild api info from scratch.
WARNING: will load all maya-installed plugins, without making an
attempt to return the loaded plugins to the state they were at before
this command is run. Also, the act of loading all the plugins may
crash maya, especially if done from a non-GUI session
"""
# Put in a debug, because this can be crashy
_logger.debug("Starting ApiCache._buildApiTypeHierarchy...")
if not startup.mayaStartupHasRun():
startup.mayaInit()
import maya.cmds
import pymel.api.plugins as plugins
# load all maya plugins
# There's some weirdness with plugin loading on windows XP x64... if
# you have a fresh user profile, and do:
# import maya.standalone
# maya.standalone.initialize()
# import maya.mel as mel
# mel.eval('''source "initialPlugins.mel"''')
# ..then things work. But if you import maya.OpenMaya:
# import maya.standalone
# maya.standalone.initialize()
# import maya.OpenMaya
# import maya.mel as mel
# mel.eval('''source "initialPlugins.mel"''')
# ...it crashes when loading Mayatomr. Also, oddly, if you load
# Mayatomr directly, instead of using initialPlugins.mel, it also
# crashes:
# import maya.standalone
# maya.standalone.initialize()
# import maya.cmds
# maya.cmds.loadPlugin('C:\\3D\\Autodesk\\Maya2012\\bin\\plug-ins\\Mayatomr.mll')
# Anyway, for now, adding in the line to do sourcing of initialPlugins.mel
# until I can figure out if it's possible to avoid this crash...
import maya.mel
maya.mel.eval('source "initialPlugins.mel"')
plugins.loadAllMayaPlugins()
self._buildApiClassInfo()
self._buildMayaToApiInfo()
self._buildApiTypeToApiClasses()
_logger.debug("...finished ApiCache._buildApiTypeHierarchy")
def addMayaType(self, mayaType, apiType=None, updateObj=None):
""" Add a type to the MayaTypes lists. Fill as many dictionary caches as we have info for.
- mayaTypesToApiTypes
- mayaTypesToApiEnums
if updateObj is given, this instance will first be updated from it,
before the mayaType is added.
"""
if apiType is not 'kInvalid':
apiEnum = getattr(api.MFn, apiType)
self.mayaTypesToApiTypes[mayaType] = apiType
self.mayaTypesToApiEnums[mayaType] = apiEnum
def removeMayaType(self, mayaType, updateObj=None):
""" Remove a type from the MayaTypes lists.
- mayaTypesToApiTypes
- mayaTypesToApiEnums
if updateObj is given, this instance will first be updated from it,
before the mayaType is added.
"""
self.mayaTypesToApiEnums.pop(mayaType, None)
self.mayaTypesToApiTypes.pop(mayaType, None)
def read(self, raw=False):
data = super(ApiCache, self).read()
if not raw:
# Before 2012, we cached reservedMayaTypes and reservedApiTypes,
# even though they weren't used...
if data is not None and len(data) != len(self._CACHE_NAMES):
if len(data) == 8 and versions.current() < versions.v2012:
data = data[2:6] + data[7:]
else:
# we need to rebuild, return None
data = None
return data
def rebuild(self):
"""Rebuild the api cache from scratch
Unlike 'build', this does not attempt to load a cache file, but always
rebuilds it by parsing the docs, etc.
"""
_logger.info("Rebuilding the API Caches...")
# fill out the data structures
self._buildApiTypesList()
#_buildMayaTypesList()
self._buildApiRelationships()
# merge in the manual overrides: we only do this when we're rebuilding or in the pymelControlPanel
_logger.info('merging in dictionary of manual api overrides')
self._mergeClassOverrides()
def _mergeClassOverrides(self, bridgeCache=None):
if bridgeCache is None:
bridgeCache = ApiMelBridgeCache()
bridgeCache.build()
_util.mergeCascadingDicts(bridgeCache.apiClassOverrides, self.apiClassInfo, allowDictToListMerging=True)
def melBridgeContents(self):
return self._mayaApiMelBridge.contents()
def extraDicts(self):
return tuple(getattr(self, x) for x in self.EXTRA_GLOBAL_NAMES)
| |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from ssl import SSLSocket
from ..._async_compat.util import Util
from ..._exceptions import (
BoltError,
BoltProtocolError,
)
from ...api import (
READ_ACCESS,
SYSTEM_DATABASE,
Version,
)
from ...exceptions import (
ConfigurationError,
DatabaseUnavailable,
DriverError,
ForbiddenOnReadOnlyDatabase,
Neo4jError,
NotALeader,
ServiceUnavailable,
)
from ._bolt3 import (
ServerStateManager,
ServerStates,
)
from ._bolt import Bolt
from ._common import (
check_supported_server_product,
CommitResponse,
InitResponse,
Response,
)
log = getLogger("neo4j")
class Bolt4x0(Bolt):
""" Protocol handler for Bolt 4.0.
This is supported by Neo4j versions 4.0, 4.1 and 4.2.
"""
PROTOCOL_VERSION = Version(4, 0)
supports_multiple_results = True
supports_multiple_databases = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_state_manager = ServerStateManager(
ServerStates.CONNECTED, on_change=self._on_server_state_change
)
def _on_server_state_change(self, old_state, new_state):
log.debug("[#%04X] State: %s > %s", self.local_port,
old_state.name, new_state.name)
@property
def is_reset(self):
# We can't be sure of the server's state if there are still pending
# responses. Unless the last message we sent was RESET. In that case
# the server state will always be READY when we're done.
if (self.responses and self.responses[-1]
and self.responses[-1].message == "reset"):
return True
return self._server_state_manager.state == ServerStates.READY
@property
def encrypted(self):
return isinstance(self.socket, SSLSocket)
@property
def der_encoded_server_certificate(self):
return self.socket.getpeercert(binary_form=True)
@property
def local_port(self):
try:
return self.socket.getsockname()[1]
except OSError:
return 0
def get_base_headers(self):
return {
"user_agent": self.user_agent,
}
def hello(self):
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello",
on_success=self.server_info.update))
self.send_all()
self.fetch_all()
check_supported_server_product(self.server_info.agent)
def route(self, database=None, imp_user=None, bookmarks=None):
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
metadata = {}
records = []
if database is None: # default database
self.run(
"CALL dbms.routing.getRoutingTable($context)",
{"context": self.routing_context},
mode="r",
bookmarks=bookmarks,
db=SYSTEM_DATABASE,
on_success=metadata.update
)
else:
self.run(
"CALL dbms.routing.getRoutingTable($context, $database)",
{"context": self.routing_context, "database": database},
mode="r",
bookmarks=bookmarks,
db=SYSTEM_DATABASE,
on_success=metadata.update
)
self.pull(on_success=metadata.update, on_records=records.extend)
self.send_all()
self.fetch_all()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
return routing_info
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None, **handlers):
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if db:
extra["db"] = db
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port, " ".join(map(repr, fields)))
if query.upper() == u"COMMIT":
self._append(b"\x10", fields, CommitResponse(self, "run",
**handlers))
else:
self._append(b"\x10", fields, Response(self, "run", **handlers))
def discard(self, n=-1, qid=-1, **handlers):
extra = {"n": n}
if qid != -1:
extra["qid"] = qid
log.debug("[#%04X] C: DISCARD %r", self.local_port, extra)
self._append(b"\x2F", (extra,), Response(self, "discard", **handlers))
def pull(self, n=-1, qid=-1, **handlers):
extra = {"n": n}
if qid != -1:
extra["qid"] = qid
log.debug("[#%04X] C: PULL %r", self.local_port, extra)
self._append(b"\x3F", (extra,), Response(self, "pull", **handlers))
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, **handlers):
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if db:
extra["db"] = db
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,), Response(self, "begin", **handlers))
def commit(self, **handlers):
log.debug("[#%04X] C: COMMIT", self.local_port)
self._append(b"\x12", (), CommitResponse(self, "commit", **handlers))
def rollback(self, **handlers):
log.debug("[#%04X] C: ROLLBACK", self.local_port)
self._append(b"\x13", (), Response(self, "rollback", **handlers))
def reset(self):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise BoltProtocolError("RESET failed %r" % metadata, self.unresolved_address)
log.debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F", response=Response(self, "reset", on_failure=fail))
self.send_all()
self.fetch_all()
def _fetch_message(self):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
# Receive exactly one message
details, summary_signature, summary_metadata = \
Util.next(self.inbox)
if details:
log.debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # Do not log any data
self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log.debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
self._server_state_manager.transition(response.message,
summary_metadata)
response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
log.debug("[#%04X] S: IGNORED", self.local_port)
response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
log.debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
self._server_state_manager.state = ServerStates.FAILED
try:
response.on_failure(summary_metadata or {})
except (ServiceUnavailable, DatabaseUnavailable):
if self.pool:
self.pool.deactivate(address=self.unresolved_address)
raise
except (NotALeader, ForbiddenOnReadOnlyDatabase):
if self.pool:
self.pool.on_write_failure(address=self.unresolved_address)
raise
except Neo4jError as e:
if self.pool and e.invalidates_all_connections():
self.pool.mark_all_stale()
raise
else:
raise BoltProtocolError("Unexpected response message with signature "
"%02X" % ord(summary_signature), self.unresolved_address)
return len(details), 1
def close(self):
""" Close the connection.
"""
if not self._closed:
if not self._defunct:
log.debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", ())
try:
self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
def closed(self):
return self._closed
def defunct(self):
return self._defunct
class Bolt4x1(Bolt4x0):
""" Protocol handler for Bolt 4.1.
This is supported by Neo4j versions 4.1 - 4.4.
"""
PROTOCOL_VERSION = Version(4, 1)
def get_base_headers(self):
""" Bolt 4.1 passes the routing context, originally taken from
the URI, into the connection initialisation message. This
enables server-side routing to propagate the same behaviour
through its driver.
"""
headers = {
"user_agent": self.user_agent,
}
if self.routing_context is not None:
headers["routing"] = self.routing_context
return headers
class Bolt4x2(Bolt4x1):
""" Protocol handler for Bolt 4.2.
This is supported by Neo4j version 4.2 - 4.4.
"""
PROTOCOL_VERSION = Version(4, 2)
class Bolt4x3(Bolt4x2):
""" Protocol handler for Bolt 4.3.
This is supported by Neo4j version 4.3 - 4.4.
"""
PROTOCOL_VERSION = Version(4, 3)
def route(self, database=None, imp_user=None, bookmarks=None):
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
routing_context = self.routing_context or {}
log.debug("[#%04X] C: ROUTE %r %r %r", self.local_port,
routing_context, bookmarks, database)
metadata = {}
if bookmarks is None:
bookmarks = []
else:
bookmarks = list(bookmarks)
self._append(b"\x66", (routing_context, bookmarks, database),
response=Response(self, "route",
on_success=metadata.update))
self.send_all()
self.fetch_all()
return [metadata.get("rt")]
def hello(self):
def on_success(metadata):
self.configuration_hints.update(metadata.pop("hints", {}))
self.server_info.update(metadata)
if "connection.recv_timeout_seconds" in self.configuration_hints:
recv_timeout = self.configuration_hints[
"connection.recv_timeout_seconds"
]
if isinstance(recv_timeout, int) and recv_timeout > 0:
self.socket.settimeout(recv_timeout)
else:
log.info("[#%04X] Server supplied an invalid value for "
"connection.recv_timeout_seconds (%r). Make sure "
"the server and network is set up correctly.",
self.local_port, recv_timeout)
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello",
on_success=on_success))
self.send_all()
self.fetch_all()
check_supported_server_product(self.server_info.agent)
class Bolt4x4(Bolt4x3):
""" Protocol handler for Bolt 4.4.
This is supported by Neo4j version 4.4.
"""
PROTOCOL_VERSION = Version(4, 4)
def route(self, database=None, imp_user=None, bookmarks=None):
routing_context = self.routing_context or {}
db_context = {}
if database is not None:
db_context.update(db=database)
if imp_user is not None:
db_context.update(imp_user=imp_user)
log.debug("[#%04X] C: ROUTE %r %r %r", self.local_port,
routing_context, bookmarks, db_context)
metadata = {}
if bookmarks is None:
bookmarks = []
else:
bookmarks = list(bookmarks)
self._append(b"\x66", (routing_context, bookmarks, db_context),
response=Response(self, "route",
on_success=metadata.update))
self.send_all()
self.fetch_all()
return [metadata.get("rt")]
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None, **handlers):
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
# It will default to mode "w" if nothing is specified
extra["mode"] = "r"
if db:
extra["db"] = db
if imp_user:
extra["imp_user"] = imp_user
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port,
" ".join(map(repr, fields)))
if query.upper() == u"COMMIT":
self._append(b"\x10", fields, CommitResponse(self, "run",
**handlers))
else:
self._append(b"\x10", fields, Response(self, "run", **handlers))
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, **handlers):
extra = {}
if mode in (READ_ACCESS, "r"):
# It will default to mode "w" if nothing is specified
extra["mode"] = "r"
if db:
extra["db"] = db
if imp_user:
extra["imp_user"] = imp_user
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,), Response(self, "begin", **handlers))
| |
# -*- coding: utf-8 -*-
'''
Return config information
'''
# Import python libs
from __future__ import absolute_import
import re
import os
# Import salt libs
import salt.utils
import salt.syspaths as syspaths
# Import 3rd-party libs
import salt.ext.six as six
# Set up the default values for all systems
DEFAULTS = {'mongo.db': 'salt',
'mongo.host': 'salt',
'mongo.password': '',
'mongo.port': 27017,
'mongo.user': '',
'redis.db': '0',
'redis.host': 'salt',
'redis.port': 6379,
'test.foo': 'unconfigured',
'ca.cert_base_path': '/etc/pki',
'solr.cores': [],
'solr.host': 'localhost',
'solr.port': '8983',
'solr.baseurl': '/solr',
'solr.type': 'master',
'solr.request_timeout': None,
'solr.init_script': '/etc/rc.d/solr',
'solr.dih.import_options': {'clean': False, 'optimize': True,
'commit': True, 'verbose': False},
'solr.backup_path': None,
'solr.num_backups': 1,
'poudriere.config': '/usr/local/etc/poudriere.conf',
'poudriere.config_dir': '/usr/local/etc/poudriere.d',
'ldap.server': 'localhost',
'ldap.port': '389',
'ldap.tls': False,
'ldap.scope': 2,
'ldap.attrs': None,
'ldap.binddn': '',
'ldap.bindpw': '',
'hosts.file': '/etc/hosts',
'aliases.file': '/etc/aliases',
'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, 'salt-images'),
'virt.tunnel': False,
}
def backup_mode(backup=''):
'''
Return the backup mode
CLI Example:
.. code-block:: bash
salt '*' config.backup_mode
'''
if backup:
return backup
return option('backup_mode')
def manage_mode(mode):
'''
Return a mode value, normalized to a string
CLI Example:
.. code-block:: bash
salt '*' config.manage_mode
'''
if mode is None:
return None
ret = str(mode).lstrip('0').zfill(4)
if ret[0] != '0':
return '0{0}'.format(ret)
return ret
def valid_fileproto(uri):
'''
Returns a boolean value based on whether or not the URI passed has a valid
remote file protocol designation
CLI Example:
.. code-block:: bash
salt '*' config.valid_fileproto salt://path/to/file
'''
try:
return bool(re.match('^(?:salt|https?|ftp)://', uri))
except Exception:
return False
def option(
value,
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
'''
Pass in a generic option and receive the value that will be assigned
CLI Example:
.. code-block:: bash
salt '*' config.option redis.host
'''
if not omit_opts:
if value in __opts__:
return __opts__[value]
if not omit_master:
if value in __pillar__.get('master', {}):
return __pillar__['master'][value]
if not omit_pillar:
if value in __pillar__:
return __pillar__[value]
if value in DEFAULTS:
return DEFAULTS[value]
return default
def merge(value,
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
'''
Retrieves an option based on key, merging all matches.
Same as ``option()`` except that it merges all matches, rather than taking
the first match.
CLI Example:
.. code-block:: bash
salt '*' config.merge schedule
'''
ret = None
if not omit_opts:
if value in __opts__:
ret = __opts__[value]
if isinstance(ret, str):
return ret
if not omit_master:
if value in __pillar__.get('master', {}):
tmp = __pillar__['master'][value]
if ret is None:
ret = tmp
if isinstance(ret, str):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
ret = tmp
elif (isinstance(ret, (list, tuple)) and
isinstance(tmp, (list, tuple))):
ret = list(ret) + list(tmp)
if not omit_pillar:
if value in __pillar__:
tmp = __pillar__[value]
if ret is None:
ret = tmp
if isinstance(ret, str):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
ret = tmp
elif (isinstance(ret, (list, tuple)) and
isinstance(tmp, (list, tuple))):
ret = list(ret) + list(tmp)
if ret is None and value in DEFAULTS:
return DEFAULTS[value]
return ret or default
def get(key, default=''):
'''
.. versionadded: 0.14.0
Attempt to retrieve the named value from opts, pillar, grains of the master
config, if the named value is not available return the passed default.
The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
This routine traverses these data stores in this order:
- Local minion config (opts)
- Minion's grains
- Minion's pillar
- Master config
CLI Example:
.. code-block:: bash
salt '*' config.get pkg:apache
'''
ret = salt.utils.traverse_dict_and_list(__opts__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__grains__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__pillar__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-')
if ret != '_|-':
return ret
return default
def dot_vals(value):
'''
Pass in a configuration value that should be preceded by the module name
and a dot, this will return a list of all read key/value pairs
CLI Example:
.. code-block:: bash
salt '*' config.dot_vals host
'''
ret = {}
for key, val in six.iteritems(__pillar__.get('master', {})):
if key.startswith('{0}.'.format(value)):
ret[key] = val
for key, val in six.iteritems(__opts__):
if key.startswith('{0}.'.format(value)):
ret[key] = val
return ret
| |
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
from os import path, makedirs
from os import sys
from qstkutil import DataEvolved as de
from qstkutil import qsdateutil as du
from qstkutil import tsutil as tsu
from qstkutil import fundutil as fu
import numpy as np
from math import log10
import converter
import locale
from pylab import savefig
from matplotlib import pyplot
from matplotlib import gridspec
import matplotlib.dates as mdates
import cPickle
import datetime as dt
import pandas
import numpy as np
from copy import deepcopy
def _dividend_rets_funds(df_funds, f_dividend_rets):
df_funds_copy = deepcopy(df_funds)
f_price = deepcopy(df_funds_copy[0])
df_funds_copy.values[1:] = (df_funds_copy.values[1:]/df_funds_copy.values[0:-1])
df_funds_copy.values[0] = 1
df_funds_copy = df_funds_copy + f_dividend_rets
na_funds_copy = np.cumprod(df_funds_copy.values)
na_funds_copy = na_funds_copy*f_price
df_funds = pandas.Series(na_funds_copy, index = df_funds_copy.index)
return df_funds
def print_header(html_file, name):
"""
@summary prints header of report html file
"""
html_file.write("<HTML>\n")
html_file.write("<HEAD>\n")
html_file.write("<TITLE>QSTK Generated Report:" + name + "</TITLE>\n")
html_file.write("</HEAD>\n\n")
html_file.write("<BODY>\n\n")
def print_footer(html_file):
"""
@summary prints footer of report html file
"""
html_file.write("</BODY>\n\n")
html_file.write("</HTML>")
def get_annual_return(fund_ts, years):
"""
@summary prints annual return for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
lf_ret=[]
for year in years:
year_vals = []
for date in fund_ts.index:
if(date.year ==year):
year_vals.append([fund_ts.ix[date]])
day_rets = tsu.daily1(year_vals)
ret = tsu.get_ror_annual(day_rets)
ret=float(ret)
lf_ret.append(ret*100) #" %+8.2f%%" % (ret*100)
return lf_ret
def get_winning_days(fund_ts, years):
"""
@summary prints winning days for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
s_ret=""
for year in years:
year_vals = []
for date in fund_ts.index:
if(date.year==year):
year_vals.append([fund_ts.ix[date]])
ret = fu.get_winning_days(year_vals)
s_ret+=" % + 8.2f%%" % ret
return s_ret
def get_max_draw_down(fund_ts, years):
"""
@summary prints max draw down for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
s_ret=""
for year in years:
year_vals = []
for date in fund_ts.index:
if(date.year==year):
year_vals.append(fund_ts.ix[date])
ret = fu.get_max_draw_down(year_vals)
s_ret+=" % + 8.2f%%" % (ret*100)
return s_ret
def get_daily_sharpe(fund_ts, years):
"""
@summary prints sharpe ratio for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
s_ret=""
for year in years:
year_vals = []
for date in fund_ts.index:
if(date.year==year):
year_vals.append([fund_ts.ix[date]])
ret = fu.get_sharpe_ratio(year_vals)
s_ret+=" % + 8.2f " % ret
return s_ret
def get_daily_sortino(fund_ts, years):
"""
@summary prints sortino ratio for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
s_ret=""
for year in years:
year_vals = []
for date in fund_ts.index:
if(date.year==year):
year_vals.append([fund_ts.ix[date]])
ret = fu.get_sortino_ratio(year_vals)
s_ret+=" % + 8.2f " % ret
return s_ret
def get_std_dev(fund_ts):
"""
@summary gets standard deviation of returns for a fund as a string
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
fund_ts=fund_ts.fillna(method='pad')
fund_ts=fund_ts.fillna(method='bfill')
ret=np.std(tsu.daily(fund_ts.values))*10000
return ("%+7.2f bps " % ret)
def print_industry_coer(fund_ts, ostream):
"""
@summary prints standard deviation of returns for a fund
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
industries = [['$DJUSBM', 'Materials'],
['$DJUSNC', 'Goods'],
['$DJUSCY', 'Services'],
['$DJUSFN', 'Financials'],
['$DJUSHC', 'Health'],
['$DJUSIN', 'Industrial'],
['$DJUSEN', 'Oil & Gas'],
['$DJUSTC', 'Technology'],
['$DJUSTL', 'TeleComm'],
['$DJUSUT', 'Utilities']]
for i in range(0, len(industries) ):
if(i%2==0):
ostream.write("\n")
#load data
norObj = de.DataAccess('mysql')
ldtTimestamps = du.getNYSEdays( fund_ts.index[0], fund_ts.index[-1], dt.timedelta(hours=16) )
ldfData = norObj.get_data( ldtTimestamps, [industries[i][0]], ['close'] )
#get corelation
ldfData[0]=ldfData[0].fillna(method='pad')
ldfData[0]=ldfData[0].fillna(method='bfill')
a=np.corrcoef(np.ravel(tsu.daily(ldfData[0][industries[i][0]])),np.ravel(tsu.daily(fund_ts.values)))
b=np.ravel(tsu.daily(ldfData[0][industries[i][0]]))
f=np.ravel(tsu.daily(fund_ts))
fBeta, unused = np.polyfit(b,f,1)
ostream.write("%10s(%s):%+6.2f, %+6.2f " % (industries[i][1], industries[i][0], a[0,1], fBeta))
def print_other_coer(fund_ts, ostream):
"""
@summary prints standard deviation of returns for a fund
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
industries = [['$SPX', ' S&P Index'],
['$DJI', ' Dow Jones'],
['$DJUSEN', 'Oil & Gas'],
['$DJGSP', ' Metals']]
for i in range(0, len(industries) ):
if(i%2==0):
ostream.write("\n")
#load data
norObj =de.DataAccess('mysql')
ldtTimestamps = du.getNYSEdays( fund_ts.index[0], fund_ts.index[-1], dt.timedelta(hours=16) )
ldfData = norObj.get_data( ldtTimestamps, [industries[i][0]], ['close'] )
#get corelation
ldfData[0]=ldfData[0].fillna(method='pad')
ldfData[0]=ldfData[0].fillna(method='bfill')
a=np.corrcoef(np.ravel(tsu.daily(ldfData[0][industries[i][0]])),np.ravel(tsu.daily(fund_ts.values)))
b=np.ravel(tsu.daily(ldfData[0][industries[i][0]]))
f=np.ravel(tsu.daily(fund_ts))
fBeta, unused = np.polyfit(b,f,1)
ostream.write("%10s(%s):%+6.2f, %+6.2f " % (industries[i][1], industries[i][0], a[0,1], fBeta))
def print_benchmark_coer(fund_ts, benchmark_close, sym, ostream):
"""
@summary prints standard deviation of returns for a fund
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
fund_ts=fund_ts.fillna(method='pad')
fund_ts=fund_ts.fillna(method='bfill')
benchmark_close=benchmark_close.fillna(method='pad')
benchmark_close=benchmark_close.fillna(method='bfill')
faCorr=np.corrcoef(np.ravel(tsu.daily(fund_ts.values)),np.ravel(tsu.daily(benchmark_close)));
b=np.ravel(tsu.daily(benchmark_close))
f=np.ravel(tsu.daily(fund_ts))
fBeta, unused = np.polyfit(b,f, 1);
print_line(sym+"Correlattion","%+6.2f" % faCorr[0,1],i_spacing=3,ostream=ostream)
print_line(sym+"Beta","%+6.2f" % fBeta,i_spacing=3,ostream=ostream)
def print_monthly_returns(fund_ts, years, ostream):
"""
@summary prints monthly returns for given fund and years to the given stream
@param fund_ts: pandas fund time series
@param years: list of years to print out
@param ostream: stream to print to
"""
ostream.write(" ")
month_names = du.getMonthNames()
for name in month_names:
ostream.write(" " + str(name))
ostream.write("\n")
i = 0
mrets = tsu.monthly(fund_ts)
for year in years:
ostream.write(str(year))
months = du.getMonths(fund_ts, year)
for k in range(1, months[0]):
ostream.write(" ")
for month in months:
ostream.write(" % + 6.2f" % (mrets[i]*100))
i += 1
ostream.write("\n")
def print_years(years, ostream):
ostream.write("\n\n ")
for year in years:
ostream.write(" " + str(year))
ostream.write("\n ")
for year in years:
ostream.write(" " + '------')
ostream.write("\n")
def print_line(s_left_side, s_right_side, i_spacing=0, ostream="stdout"):
ostream.write("%35s:%s%30s\n" % (s_left_side, " "*i_spacing, s_right_side))
def print_stats(fund_ts, benchmark, name, lf_dividend_rets=0.0, original="",s_fund_name="Fund",
s_original_name="Original", d_trading_params="", d_hedge_params="", s_comments="", directory = False,
leverage = False, commissions = 0, slippage = 0, borrowcost = 0, ostream = sys.stdout, i_start_cash=1000000):
"""
@summary prints stats of a provided fund and benchmark
@param fund_ts: fund value in pandas timeseries
@param benchmark: benchmark symbol to compare fund to
@param name: name to associate with the fund in the report
@param directory: parameter to specify printing to a directory
@param leverage: time series to plot with report
@param commissions: value to print with report
@param slippage: value to print with report
@param ostream: stream to print stats to, defaults to stdout
"""
#Set locale for currency conversions
locale.setlocale(locale.LC_ALL, '')
#make names length independent for alignment
s_formatted_original_name="%15s" % s_original_name
s_formatted_fund_name = "%15s" % s_fund_name
fund_ts=fund_ts.fillna(method='pad')
fund_ts=fund_ts.fillna(method='bfill')
fund_ts=fund_ts.fillna(1.0)
if directory != False :
if not path.exists(directory):
makedirs(directory)
sfile = path.join(directory, "report-%s.html" % name )
splot = "plot-%s.png" % name
splot_dir = path.join(directory, splot)
ostream = open(sfile, "wb")
ostream.write("<pre>")
print "writing to ", sfile
if type(original)==type("str"):
if type(leverage)!=type(False):
print_plot(fund_ts, benchmark, name, splot_dir, lf_dividend_rets, leverage=leverage, i_start_cash= i_start_cash)
else:
print_plot(fund_ts, benchmark, name, splot_dir, lf_dividend_rets, i_start_cash= i_start_cash)
else:
if type(leverage)!=type(False):
print_plot([fund_ts, original], benchmark, name, splot_dir, s_original_name, lf_dividend_rets, leverage=leverage, i_start_cash= i_start_cash)
else:
print_plot([fund_ts, original], benchmark, name, splot_dir, s_original_name, lf_dividend_rets, i_start_cash = i_start_cash)
start_date = fund_ts.index[0].strftime("%m/%d/%Y")
end_date = fund_ts.index[-1].strftime("%m/%d/%Y")
ostream.write("Performance Summary for "\
+ str(path.basename(name)) + " Backtest\n")
ostream.write("For the dates " + str(start_date) + " to "\
+ str(end_date) + "")
#paramater section
if d_trading_params!="":
ostream.write("\n\nTrading Paramaters\n\n")
for var in d_trading_params:
print_line(var, d_trading_params[var],ostream=ostream)
if d_hedge_params!="":
ostream.write("\nHedging Paramaters\n\n")
if type(d_hedge_params['Weight of Hedge']) == type(float):
d_hedge_params['Weight of Hedge'] = str(int(d_hedge_params['Weight of Hedge']*100)) + '%'
for var in d_hedge_params:
print_line(var, d_hedge_params[var],ostream=ostream)
#comment section
if s_comments!="":
ostream.write("\nComments\n\n%s" % s_comments)
if directory != False :
ostream.write("\n\n<img src="+splot+" width=600 />\n\n")
mult = i_start_cash/fund_ts.values[0]
timeofday = dt.timedelta(hours = 16)
timestamps = du.getNYSEdays(fund_ts.index[0], fund_ts.index[-1], timeofday)
dataobj =de.DataAccess('mysql')
years = du.getYears(fund_ts)
benchmark_close = dataobj.get_data(timestamps, benchmark, ["close"], \
verbose = False)[0]
for bench_sym in benchmark:
benchmark_close[bench_sym]=benchmark_close[bench_sym].fillna(method='pad')
benchmark_close[bench_sym]=benchmark_close[bench_sym].fillna(method='bfill')
benchmark_close[bench_sym]=benchmark_close[bench_sym].fillna(1.0)
if type(lf_dividend_rets) != type(0.0):
for i,sym in enumerate(benchmark):
benchmark_close[sym] = _dividend_rets_funds(benchmark_close[sym], lf_dividend_rets[i])
ostream.write("Resulting Values in $ with an initial investment of "+ locale.currency(int(round(i_start_cash)), grouping=True) + "\n")
print_line(s_formatted_fund_name+" Resulting Value",(locale.currency(int(round(fund_ts.values[-1]*mult)), grouping=True)),i_spacing=3, ostream=ostream)
if type(original)!=type("str"):
mult3 = i_start_cash / original.values[0]
print_line(s_formatted_original_name +" Resulting Value",(locale.currency(int(round(original.values[-1]*mult3)), grouping=True)),i_spacing=3, ostream=ostream)
for bench_sym in benchmark:
mult2= i_start_cash / benchmark_close[bench_sym].values[0]
print_line(bench_sym+" Resulting Value",locale.currency(int(round(benchmark_close[bench_sym].values[-1]*mult2)), grouping=True),i_spacing=3, ostream=ostream)
ostream.write("\n")
if len(years) > 1:
print_line(s_formatted_fund_name+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(fund_ts.values)[0],i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(original.values)[0],i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(benchmark_close[bench_sym].values)[0],i_spacing=4,ostream=ostream)
ostream.write("\n")
ostream.write("Transaction Costs\n")
print_line("Total Commissions"," %15s, %10.2f%%" % (locale.currency(int(round(commissions)), grouping=True), \
float((round(commissions)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Slippage"," %15s, %10.2f%%" % (locale.currency(int(round(slippage)), grouping=True), \
float((round(slippage)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Short Borrowing Cost"," %15s, %10.2f%%" % (locale.currency(int(round(borrowcost)), grouping=True), \
float((round(borrowcost)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Costs"," %15s, %10.2f%%" % (locale.currency(int(round(borrowcost+slippage+commissions)), grouping=True), \
float((round(borrowcost+slippage+commissions)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
ostream.write("\n")
print_line(s_formatted_fund_name+" Std Dev of Returns",get_std_dev(fund_ts),i_spacing=8, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Std Dev of Returns", get_std_dev(original), i_spacing=8, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Std Dev of Returns", get_std_dev(benchmark_close[bench_sym]), i_spacing=8, ostream=ostream)
ostream.write("\n")
for bench_sym in benchmark:
print_benchmark_coer(fund_ts, benchmark_close[bench_sym], str(bench_sym), ostream)
ostream.write("\n")
ostream.write("\nYearly Performance Metrics")
print_years(years, ostream)
s_line=""
for f_token in get_annual_return(fund_ts, years):
s_line+=" %+8.2f%%" % f_token
print_line(s_formatted_fund_name+" Annualized Return",s_line, i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
s_line=""
for f_token in get_annual_return(original, years):
s_line+=" %+8.2f%%" % f_token
print_line(s_formatted_original_name+" Annualized Return", s_line, i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
s_line=""
for f_token in get_annual_return(benchmark_close[bench_sym], years):
s_line+=" %+8.2f%%" % f_token
print_line(bench_sym+" Annualized Return", s_line, i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Winning Days",get_winning_days(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Winning Days",get_winning_days(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Winning Days",get_winning_days(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Max Draw Down",get_max_draw_down(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Max Draw Down",get_max_draw_down(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Max Draw Down",get_max_draw_down(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Daily Sharpe Ratio",get_daily_sharpe(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Daily Sharpe Ratio",get_daily_sharpe(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Daily Sharpe Ratio",get_daily_sharpe(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Daily Sortino Ratio",get_daily_sortino(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Daily Sortino Ratio",get_daily_sortino(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Daily Sortino Ratio",get_daily_sortino(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
ostream.write("\n\n\nCorrelation and Beta with DJ Industries for the Fund ")
print_industry_coer(fund_ts,ostream)
ostream.write("\n\nCorrelation and Beta with Other Indices for the Fund ")
print_other_coer(fund_ts,ostream)
ostream.write("\n\n\nMonthly Returns for the Fund %\n")
print_monthly_returns(fund_ts, years, ostream)
if directory != False:
ostream.write("</pre>")
def print_html(fund_ts, benchmark, name, lf_dividend_rets=0.0, original="",s_fund_name="Fund", s_original_name="Original", d_trading_params="", d_hedge_params="", s_comments="", directory = False, leverage = False, commissions = 0, slippage = 0, borrowcost = 0, ostream = sys.stdout, i_start_cash=1000000):
"""
@summary prints stats of a provided fund and benchmark
@param fund_ts: fund value in pandas timeseries
@param benchmark: benchmark symbol to compare fund to
@param name: name to associate with the fund in the report
@param directory: parameter to specify printing to a directory
@param leverage: time series to plot with report
@param commissions: value to print with report
@param slippage: value to print with report
@param ostream: stream to print stats to, defaults to stdout
"""
#Set locale for currency conversions
locale.setlocale(locale.LC_ALL, '')
#make names length independent for alignment
s_formatted_original_name="%15s" % s_original_name
s_formatted_fund_name = "%15s" % s_fund_name
fund_ts=fund_ts.fillna(method='pad')
if directory != False :
if not path.exists(directory):
makedirs(directory)
sfile = path.join(directory, "report-%s.html" % name )
splot = "plot-%s.png" % name
splot_dir = path.join(directory, splot)
ostream = open(sfile, "wb")
print "writing to ", sfile
if type(original)==type("str"):
if type(leverage)!=type(False):
print_plot(fund_ts, benchmark, name, splot_dir, lf_dividend_rets, leverage=leverage, i_start_cash = i_start_cash)
else:
print_plot(fund_ts, benchmark, name, splot_dir, lf_dividend_rets, i_start_cash = i_start_cash)
else:
if type(leverage)!=type(False):
print_plot([fund_ts, original], benchmark, name, splot_dir, s_original_name, lf_dividend_rets, leverage=leverage, i_start_cash = i_start_cash)
else:
print_plot([fund_ts, original], benchmark, name, splot_dir, s_original_name, lf_dividend_rets, i_start_cash = i_start_cash)
print_header(ostream,name)
start_date = fund_ts.index[0].strftime("%m/%d/%Y")
end_date = fund_ts.index[-1].strftime("%m/%d/%Y")
ostream.write("Performance Summary for "\
+ str(path.basename(name)) + " Backtest\n")
ostream.write("For the dates " + str(start_date) + " to "\
+ str(end_date) + "")
#paramater section
if d_trading_params!="":
ostream.write("\n\nTrading Paramaters\n\n")
for var in d_trading_params:
print_line(var, d_trading_params[var],ostream=ostream)
if d_hedge_params!="":
ostream.write("\nHedging Paramaters\n\n")
if type(d_hedge_params['Weight of Hedge']) == type(float):
d_hedge_params['Weight of Hedge'] = str(int(d_hedge_params['Weight of Hedge']*100)) + '%'
for var in d_hedge_params:
print_line(var, d_hedge_params[var],ostream=ostream)
#comment section
if s_comments!="":
ostream.write("\nComments\n\n%s" % s_comments)
if directory != False :
ostream.write("\n\n<img src="+splot+" width=600 />\n\n")
mult = i_start_cash/fund_ts.values[0]
timeofday = dt.timedelta(hours = 16)
timestamps = du.getNYSEdays(fund_ts.index[0], fund_ts.index[-1], timeofday)
dataobj =de.DataAccess('mysql')
years = du.getYears(fund_ts)
benchmark_close = dataobj.get_data(timestamps, benchmark, ["close"])
benchmark_close=benchmark_close[0]
for bench_sym in benchmark:
benchmark_close[bench_sym]=benchmark_close[bench_sym].fillna(method='pad')
if type(lf_dividend_rets) != type(0.0):
for i,sym in enumerate(benchmark):
benchmark_close[sym] = _dividend_rets_funds(benchmark_close[sym], lf_dividend_rets[i])
ostream.write("Resulting Values in $ with an initial investment of "+ locale.currency(int(round(i_start_cash)), grouping=True) + "\n")
print_line(s_formatted_fund_name+" Resulting Value",(locale.currency(int(round(fund_ts.values[-1]*mult)), grouping=True)),i_spacing=3, ostream=ostream)
if type(original)!=type("str"):
mult3 = i_start_cash / original.values[0]
print_line(s_formatted_original_name +" Resulting Value",(locale.currency(int(round(original.values[-1]*mult3)), grouping=True)),i_spacing=3, ostream=ostream)
for bench_sym in benchmark:
mult2=i_start_cash/benchmark_close[bench_sym].values[0]
print_line(bench_sym+" Resulting Value",locale.currency(int(round(benchmark_close[bench_sym].values[-1]*mult2)), grouping=True),i_spacing=3, ostream=ostream)
ostream.write("\n")
if len(years) > 1:
print_line(s_formatted_fund_name+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(fund_ts.values)[0],i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(original.values)[0],i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Sharpe Ratio","%10.3f" % fu.get_sharpe_ratio(benchmark_close[bench_sym].values)[0],i_spacing=4,ostream=ostream)
ostream.write("\n")
ostream.write("Transaction Costs\n")
print_line("Total Commissions"," %15s, %10.2f%%" % (locale.currency(int(round(commissions)), grouping=True), \
float((round(commissions)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Slippage"," %15s, %10.2f%%" % (locale.currency(int(round(slippage)), grouping=True), \
float((round(slippage)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Short Borrowing Cost"," %15s, %10.2f%%" % (locale.currency(int(round(borrowcost)), grouping=True), \
float((round(borrowcost)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
print_line("Total Costs"," %15s, %10.2f%%" % (locale.currency(int(round(borrowcost+slippage+commissions)), grouping=True), \
float((round(borrowcost+slippage+commissions)*100)/(fund_ts.values[-1]*mult))), i_spacing=4, ostream=ostream)
ostream.write("\n")
print_line(s_formatted_fund_name+" Std Dev of Returns",get_std_dev(fund_ts),i_spacing=8, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Std Dev of Returns", get_std_dev(original), i_spacing=8, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Std Dev of Returns", get_std_dev(benchmark_close[bench_sym]), i_spacing=8, ostream=ostream)
ostream.write("\n")
for bench_sym in benchmark:
print_benchmark_coer(fund_ts, benchmark_close[bench_sym], str(bench_sym), ostream)
ostream.write("\n")
ostream.write("\nYearly Performance Metrics")
print_years(years, ostream)
s_line=""
for f_token in get_annual_return(fund_ts, years):
s_line+=" %+8.2f%%" % f_token
print_line(s_formatted_fund_name+" Annualized Return", s_line, i_spacing=4, ostream=ostream)
lf_vals=[get_annual_return(fund_ts, years)]
ls_labels=[name]
if type(original)!=type("str"):
s_line=""
for f_token in get_annual_return(original, years):
s_line+=" %+8.2f%%" % f_token
print_line(s_formatted_original_name+" Annualized Return", s_line, i_spacing=4, ostream=ostream)
lf_vals.append(get_annual_return(original, years))
ls_labels.append(s_original_name)
for bench_sym in benchmark:
s_line=""
for f_token in get_annual_return(benchmark_close[bench_sym], years):
s_line+=" %+8.2f%%" % f_token
print_line(bench_sym+" Annualized Return", s_line, i_spacing=4, ostream=ostream)
lf_vals.append(get_annual_return(benchmark_close[bench_sym], years))
ls_labels.append(bench_sym)
print lf_vals
print ls_labels
ls_year_labels=[]
for i in range(0,len(years)):
ls_year_labels.append(str(years[i]))
print_bar_chart(lf_vals, ls_labels, ls_year_labels, directory+"/annual_rets.png")
print_years(years, ostream)
print_line(s_formatted_fund_name+" Winning Days",get_winning_days(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Winning Days",get_winning_days(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Winning Days",get_winning_days(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Max Draw Down",get_max_draw_down(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Max Draw Down",get_max_draw_down(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Max Draw Down",get_max_draw_down(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Daily Sharpe Ratio",get_daily_sharpe(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Daily Sharpe Ratio",get_daily_sharpe(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Daily Sharpe Ratio",get_daily_sharpe(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
print_years(years, ostream)
print_line(s_formatted_fund_name+" Daily Sortino Ratio",get_daily_sortino(fund_ts, years), i_spacing=4, ostream=ostream)
if type(original)!=type("str"):
print_line(s_formatted_original_name+" Daily Sortino Ratio",get_daily_sortino(original, years), i_spacing=4, ostream=ostream)
for bench_sym in benchmark:
print_line(bench_sym+" Daily Sortino Ratio",get_daily_sortino(benchmark_close[bench_sym], years), i_spacing=4, ostream=ostream)
ostream.write("\n\n\nCorrelation and Beta with DJ Industries for the Fund ")
print_industry_coer(fund_ts,ostream)
ostream.write("\n\nCorrelation and Beta with Other Indices for the Fund ")
print_other_coer(fund_ts,ostream)
ostream.write("\n\n\nMonthly Returns for the Fund %\n")
print_monthly_returns(fund_ts, years, ostream)
print_footer(ostream)
def print_bar_chart(llf_vals, ls_fund_labels, ls_year_labels, s_filename):
llf_vals=((1,2,3),(3,2,1),(2,2,2))
amin=min(min(llf_vals))
print amin
min_lim=0
if amin<0:
min_lim = amin
ls_fund_labels=("Fund 1","Benchmark","Original")
ls_year_labels=("2000","2001","2002")
pyplot.clf()
ind = np.arange(len(ls_year_labels))
ind=ind*2
width = 0.35
fig = pyplot.figure()
ax = fig.add_subplot(111)
colors=('r','g','b')
rects=[]
for i in range(0,len(llf_vals)):
rects.append( ax.bar(ind+width*i, llf_vals[i], width, color=colors[i]))
ax.set_ylabel('Annual Return')
ax.set_ylim(min_lim, 5)
ax.set_title('Annual Return by Fund and Year')
ax.set_xticks(ind+width*len(llf_vals)/2)
ax.set_xticklabels(ls_year_labels)
plots=[]
for i in range(0,len(llf_vals)):
plots.append(rects[i][0])
ax.legend(plots,ls_fund_labels)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
for i in range(0,len(llf_vals)):
autolabel(rects[i])
savefig(s_filename, format = 'png')
def print_plot(fund, benchmark, graph_name, filename, s_original_name="", lf_dividend_rets=0.0, leverage=False, i_start_cash = 1000000):
"""
@summary prints a plot of a provided fund and benchmark
@param fund: fund value in pandas timeseries
@param benchmark: benchmark symbol to compare fund to
@param graph_name: name to associate with the fund in the report
@param filename: file location to store plot1
"""
pyplot.clf()
if type(leverage)!=type(False):
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
pyplot.subplot(gs[0])
start_date = 0
end_date = 0
if(type(fund)!= type(list())):
if(start_date == 0 or start_date>fund.index[0]):
start_date = fund.index[0]
if(end_date == 0 or end_date<fund.index[-1]):
end_date = fund.index[-1]
mult = i_start_cash/fund.values[0]
pyplot.plot(fund.index, fund.values * mult, label = \
path.basename(graph_name))
else:
i=0
for entity in fund:
if(start_date == 0 or start_date>entity.index[0]):
start_date = entity.index[0]
if(end_date == 0 or end_date<entity.index[-1]):
end_date = entity.index[-1]
mult = i_start_cash/entity.values[0]
if i == 1 and len(fund)!=1:
pyplot.plot(entity.index, entity.values * mult, label = \
s_original_name)
else:
pyplot.plot(entity.index, entity.values * mult, label = \
path.basename(graph_name))
i=i+1
timeofday = dt.timedelta(hours = 16)
timestamps = du.getNYSEdays(start_date, end_date, timeofday)
dataobj = de.DataAccess('mysql')
benchmark_close = dataobj.get_data(timestamps, benchmark, ["close"])
benchmark_close = benchmark_close[0]
benchmark_close = benchmark_close.fillna(method='pad')
benchmark_close = benchmark_close.fillna(method='bfill')
benchmark_close = benchmark_close.fillna(1.0)
if type(lf_dividend_rets) != type(0.0):
for i,sym in enumerate(benchmark):
benchmark_close[sym] = _dividend_rets_funds(benchmark_close[sym], lf_dividend_rets[i])
for sym in benchmark:
mult = i_start_cash / benchmark_close[sym].values[0]
pyplot.plot(benchmark_close[sym].index, \
benchmark_close[sym].values*mult, label = sym)
pyplot.gcf().autofmt_xdate()
pyplot.gca().fmt_xdata = mdates.DateFormatter('%m-%d-%Y')
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %d %Y'))
pyplot.xlabel('Date')
pyplot.ylabel('Fund Value')
pyplot.legend(loc = "best")
if type(leverage)!=type(False):
pyplot.subplot(gs[1])
pyplot.plot(leverage.index, leverage.values, label="Leverage")
pyplot.gcf().autofmt_xdate()
pyplot.gca().fmt_xdata = mdates.DateFormatter('%m-%d-%Y')
pyplot.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %d %Y'))
labels=[]
max_label=max(leverage.values)
min_label=min(leverage.values)
rounder= -1*(round(log10(max_label))-1)
labels.append(round(min_label*0.9, int(rounder)))
labels.append(round((max_label+min_label)/2, int(rounder)))
labels.append(round(max_label*1.1, int(rounder)))
pyplot.yticks(labels)
pyplot.legend(loc = "best")
pyplot.title(graph_name + " Leverage")
pyplot.xlabel('Date')
pyplot.legend()
savefig(filename, format = 'png')
def generate_report(funds_list, graph_names, out_file, i_start_cash = 10000):
"""
@summary generates a report given a list of fund time series
"""
html_file = open("report.html","w")
print_header(html_file, out_file)
html_file.write("<IMG SRC = \'./funds.png\' width = 400/>\n")
html_file.write("<BR/>\n\n")
i = 0
pyplot.clf()
#load spx for time frame
symbol = ["$SPX"]
start_date = 0
end_date = 0
for fund in funds_list:
if(type(fund)!= type(list())):
if(start_date == 0 or start_date>fund.index[0]):
start_date = fund.index[0]
if(end_date == 0 or end_date<fund.index[-1]):
end_date = fund.index[-1]
mult = i_start_cash/fund.values[0]
pyplot.plot(fund.index, fund.values * mult, label = \
path.basename(graph_names[i]))
else:
if(start_date == 0 or start_date>fund[0].index[0]):
start_date = fund[0].index[0]
if(end_date == 0 or end_date<fund[0].index[-1]):
end_date = fund[0].index[-1]
mult = i_start_cash/fund[0].values[0]
pyplot.plot(fund[0].index, fund[0].values * mult, label = \
path.basename(graph_names[i]))
i += 1
timeofday = dt.timedelta(hours = 16)
timestamps = du.getNYSEdays(start_date, end_date, timeofday)
dataobj = de.DataAccess('mysql')
benchmark_close = dataobj.get_data(timestamps, symbol, ["close"], \
verbose = False)[0]
mult = i_start_cash/benchmark_close.values[0]
i = 0
for fund in funds_list:
if(type(fund)!= type(list())):
print_stats(fund, ["$SPX"], graph_names[i])
else:
print_stats( fund[0], ["$SPX"], graph_names[i])
i += 1
pyplot.plot(benchmark_close.index, \
benchmark_close.values*mult, label = "SSPX")
pyplot.ylabel('Fund Value')
pyplot.xlabel('Date')
pyplot.legend()
savefig('funds.png', format = 'png')
print_footer(html_file)
def generate_robust_report(fund_matrix, out_file):
"""
@summary generates a report using robust backtesting
@param fund_matrix: a pandas matrix of fund time series
@param out_file: filename where to print report
"""
html_file = open(out_file,"w")
print_header(html_file, out_file)
converter.fundsToPNG(fund_matrix,'funds.png')
html_file.write("<H2>QSTK Generated Report:" + out_file + "</H2>\n")
html_file.write("<IMG SRC = \'./funds.png\'/>\n")
html_file.write("<IMG SRC = \'./analysis.png\'/>\n")
html_file.write("<BR/>\n\n")
print_stats(fund_matrix, "robust funds", html_file)
print_footer(html_file)
if __name__ == '__main__':
# Usage
#
# Normal:
# python report.py 'out.pkl' ['out2.pkl' ...]
#
# Robust:
# python report.py -r 'out.pkl'
#
ROBUST = 0
if(sys.argv[1] == '-r'):
ROBUST = 1
FILENAME = "report.html"
if(ROBUST == 1):
ANINPUT = open(sys.argv[2],"r")
FUNDS = cPickle.load(ANINPUT)
generate_robust_report(FUNDS, FILENAME)
else:
FILES = sys.argv
FILES.remove(FILES[0])
FUNDS = []
for AFILE in FILES:
ANINPUT = open(AFILE,"r")
FUND = cPickle.load(ANINPUT)
FUNDS.append(FUND)
generate_report(FUNDS, FILES, FILENAME)
| |
import unittest
import tempfile
from nineml.document import Document
from nineml.utils.comprehensive_example import (
instances_of_all_types, doc1, conPropB1)
from nineml.exceptions import (NineMLNameError, NineMLUsageError)
# from tempfile import mkdtemp
import os.path
from nineml.abstraction.dynamics import Trigger
import nineml
import nineml.units as un
from nineml.user import (
DynamicsProperties, ConnectionRuleProperties, Definition)
from nineml.abstraction import Parameter, Dynamics, Regime
from nineml.abstraction.connectionrule import random_fan_in_connection_rule
class TestDocumentExceptions(unittest.TestCase):
# def setUp(self):
# self._temp_dir = mkdtemp()
#
# def tearDown(self):
# shutil.rmtree(self._temp_dir)
#
# def test_read_xml_ninemlruntimeerror(self):
# """
# line #: 582
# message: Could not read 9ML URL '{}': {}
# """
#
# self.assertRaises(
# NineMLUsageError,
# read_xml,
# url='http://this_is_a_bad_url.html',
# relative_to='/a_file.xml')
#
# def test_read_xml_ninemlruntimeerror2(self):
# """
# line #: 587
# message: Could not parse XML of 9ML file '{}': {}
# """
# bad_xml_path = os.path.join(self._temp_dir, 'bad_xml.xml')
# with open(bad_xml_path, 'w') as f:
# f.write("this file doesn't contain xml")
# self.assertRaises(
# NineMLUsageError,
# read_xml,
# url=bad_xml_path,
# relative_to='/a_file.xml')
#
# def test_get_component_class_type_ninemlxmlerror(self):
# """
# line #: 607
# message: No type defining block in ComponentClass
# """
# elem = Ev1.ComponentClass(name="a")
# self.assertRaises(
# NineMLXMLError,
# get_component_class_type,
# elem=elem)
def test_add_ninemlruntimeerror(self):
"""
line #: 66
message: Could not add {} to document '{}' as it is not a 'document
level NineML object' ('{}')
"""
self.assertRaises(
NineMLUsageError,
doc1.add,
nineml_obj=Trigger('a > b'))
def test_add_ninemlnameerror(self):
"""
line #: 75
message: Could not add element '{}' as an element with that name
already exists in the document '{}'
"""
dynB = instances_of_all_types['Dynamics']['dynA'].clone()
dynB._name = 'dynB'
self.assertRaises(
NineMLNameError,
doc1.add,
nineml_obj=dynB)
# def test_add_ninemlruntimeerror2(self):
# """
# line #: 84
# message: Attempting to add the same object '{}' {} to '{}'
# document when it is already in '{}'. Please remove it from the original
# document first
# """
#
# doc1 = instances_of_all_types[Document.nineml_type]['doc1']
# doc2 = Document()
# self.assertRaises(
# NineMLUsageError,
# doc2.add,
# element=doc1['dynA'])
def test_remove_ninemlruntimeerror(self):
"""
line #: 96
message: Could not remove {} from document as it is not a document
level NineML object ('{}')
"""
self.assertRaises(
NineMLUsageError,
doc1.remove,
nineml_obj=Trigger('a > b'))
def test_remove_ninemlnameerror(self):
"""
line #: 103
message: Could not find '{}' element to remove from document '{}'
"""
conPropZZ = ConnectionRuleProperties(name='ZZ', definition=conPropB1)
self.assertRaises(
NineMLNameError,
doc1.remove,
nineml_obj=conPropZZ,
ignore_missing=False)
def test___getitem___ninemlnameerror(self):
"""
line #: 137
message: '{}' was not found in the NineML document {} (elements in the
document were '{}').
"""
self.assertRaises(
NineMLNameError,
doc1.__getitem__,
name='ZZ')
#
# def test__load_elem_from_xml_ninemlruntimeerror(self):
# """
# line #: 217
# message: Circular reference detected in '{}(name={})' element.
# Resolution stack was:
# """
# xml = Ev2(Document.nineml_type,
# Ev2(DynamicsProperties.nineml_type,
# Ev2(Definition.nineml_type, name="B"),
# name="A"),
# Ev2(DynamicsProperties.nineml_type,
# Ev2(Definition.nineml_type, name="A"),
# name="B"))
# document = Uxml(root=xml).unserialize()
# self.assertRaises(
# NineMLUsageError,
# document._load_elem_from_xml,
# unloaded=super(Document, document).__getitem__('A'))
def test_standardize_units_ninemlruntimeerror(self):
"""
line #: 257
message: Name of unit '{}' conflicts with existing object of differring
value or type '{}' and '{}'
"""
a = ConnectionRuleProperties(
name='A',
definition=random_fan_in_connection_rule,
properties={'number': (
1.0 * un.Unit(dimension=un.dimensionless, power=0,
name='U'))})
b = ConnectionRuleProperties(
name='B',
definition=random_fan_in_connection_rule,
properties={'number': (
1.0 * un.Unit(dimension=un.dimensionless, power=1,
name='U'))})
self.assertRaises(
NineMLUsageError,
Document, a, b)
def test_standardize_units_ninemlruntimeerror2(self):
"""
line #: 268
message: Name of dimension '{}' conflicts with existing object of
differring value or type '{}' and '{}'
"""
a = Dynamics(
name='A',
parameters=[
Parameter('P1', dimension=un.Dimension(name='D', t=1))],
regime=Regime(name='default'),
aliases=['A1 := P1 * 2'])
b = Dynamics(
name='B',
parameters=[
Parameter('P1', dimension=un.Dimension(name='D', l=1))],
regime=Regime(name='default'),
aliases=['A1 := P1 * 2'])
self.assertRaises(
NineMLUsageError,
Document, a, b)
#
# def test_from_xml_ninemlxmlerror(self):
# """
# line #: 312
# message: Unrecognised XML namespace '{}', can be one of '{}'
# """
# bad_E = ElementMaker(namespace='http://bad_namespace.net')
# self.assertRaises(
# NineMLXMLError,
# Document.from_xml,
# element=bad_E.AnElement())
#
# def test_from_xml_ninemlxmlerror2(self):
# """
# line #: 317
# message: '{}' document does not have a NineML root ('{}')
# """
# self.assertRaises(
# NineMLXMLError,
# Document.from_xml,
# element=Ev2.BadRoot())
#
# def test_from_xml_ninemlruntimeerror(self):
# """
# line #: 340
# message: '{}' element does not correspond to a recognised
# document-level object
# """
# self.assertRaises(
# NineMLUsageError,
# Document.from_xml,
# element=Ev2(Trigger.nineml_type, 'a > b'))
#
# def test_from_xml_ninemlxmlerror3(self):
# """
# line #: 350
# message: Did not find matching NineML class for '{}' element
# """
# self.assertRaises(
# NineMLXMLError,
# Document.from_xml,
# element=Ev2.BadElement())
#
# def test_from_xml_notimplementederror(self):
# """
# line #: 358
# message: Cannot load '{}' element (extensions not implemented)
# """
# unrecogised_E = ElementMaker(namespace='http://unrecognised.net')
# element = Ev2(Document.nineml_type,
# unrecogised_E.UnrecognisedExtension())
# self.assertRaises(
# NotImplementedError,
# Document.from_xml,
# element=element)
#
# def test_from_xml_ninemlxmlerror5(self):
# """
# line #: 369
# message: Missing 'name' (or 'symbol') attribute from document level
# object '{}'
# """
# elem = Ev2(Document.nineml_type,
# Ev2(Dynamics.nineml_type))
# self.assertRaises(
# NineMLXMLError,
# Document.from_xml,
# element=elem)
#
# def test_from_xml_ninemlxmlerror6(self):
# """
# line #: 373
# message: Duplicate identifier '{ob1}:{name}'in NineML file '{url}'
# """
# xml = Ev2(Document.nineml_type,
# Ev2(Dynamics.nineml_type,
# name='A'),
# Ev2(Dynamics.nineml_type,
# name='A'))
# self.assertRaises(
# NineMLXMLError,
# Document.from_xml,
# element=xml)
# def test_url_ninemlruntimeerror(self):
# """
# line #: 464
# message: Cannot reset a documents url to None once it has been
# set('{}') please duplicate the document instead
# """
# doc = Document(
# Dynamics(
# name='A',
# parameters=[
# Parameter('P1', dimension=un.Dimension(name='D', t=1))],
# regime=Regime(name='default'),
# aliases=['A1 := P1 * 2']))
# tmp_dir = tempfile.mkdtemp()
# url = os.path.join(tmp_dir, 'a_url.xml')
# doc._url = url
# with self.assertRaises(NineMLUsageError):
# doc.url = None
# def test_url_ninemlruntimeerror2(self):
# """
# line #: 472
# message: Cannot set url of document to '{}' as there is already a
# document loaded in memory with that url. Please remove all references
# to it first
# (see https://docs.python.org/2/c-api/intro.html#objects-types-and-
# reference-counts)
# """
# a = Document(
# Dynamics(
# name='A',
# parameters=[
# Parameter('P1', dimension=un.Dimension(name='D', t=1))],
# regime=Regime(name='default'),
# aliases=['A1 := P1 * 2']))
# b = Document(
# Dynamics(
# name='A',
# parameters=[
# Parameter('P1', dimension=un.Dimension(name='D', t=1))],
# regime=Regime(name='default'),
# aliases=['A1 := P1 * 2']))
# tmp_dir = tempfile.mkdtemp()
# url = os.path.join(tmp_dir, 'a_url.xml')
# a._url = url
# with self.assertRaises(NineMLUsageError):
# b.url = url
# def test_url_ninemlruntimeerror3(self):
# """
# line #: 488
# message: {} is not a valid URL
# """
# doc = Document(
# Dynamics(
# name='A',
# parameters=[
# Parameter('P1', dimension=un.Dimension(name='D', t=1))],
# regime=Regime(name='default'),
# aliases=['A1 := P1 * 2']))
# with self.assertRaises(NineMLUsageError):
# doc.url = 1
# with self.assertRaises(NineMLUsageError):
# doc.url = '*;l22f23'
# with self.assertRaises(NineMLUsageError):
# doc.url = 'a_file.xml' # Not relative file path
# with self.assertRaises(NineMLUsageError):
# doc.url = '.../a_file.xml' # Not relative file path
| |
#!/usr/bin/python3
import os
import sys
import requests
import subprocess
import json
import click
from termcolor import colored, COLORS
from urllib.parse import urlencode
import webbrowser
from config import *
TWITCH_CLIENT_ID = 'e0fm2z7ufk73k2jnkm21y0gp1h9q2o'
COLORS.update({
'light_grey': 90,
'light_red': 91,
'light_green': 92,
'light_yellow': 93,
'light_blue': 94,
'light_magenta': 95,
'light_cyan': 96,
'light_white': 97
})
@click.group(invoke_without_command=True)
@click.pass_context
@click.option('--config', help='Configuration file location')
def main(ctx, config):
"""List or play Twitch streams"""
if config is not None:
set_config_path(config)
load_config()
if ctx.invoked_subcommand is None:
cmd_live()
# The cmd_* functions get called when their respective subcommand is executed
# Example: "python3 twitch-cli live" calls "cmd_live"
@main.command('live')
@click.option('--flat', is_flag=True, help='Don\'t show detailed information or prompt')
@click.option('--game', help='Show live streams for a specific game')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
def cmd_live(flat, game, quality):
"""List live channels"""
list_streams(game=game, flat=flat, playback_quality=quality)
@main.command('vods')
@click.option('--flat', is_flag=True, help='Don\'t show detailed information or prompt')
@click.argument('channel')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
def cmd_vods(channel, flat, quality):
"""List past streams of a channel"""
list_vods(channel, flat, playback_quality=quality)
@main.command('play')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
@click.argument('channel')
def cmd_play(channel, quality):
"""Play a livestream"""
play_stream(channel, quality=quality)
@main.command('follow')
@click.argument('channel')
def cmd_follow(channel):
"""Follow a channel"""
follow_channel(channel)
@main.command('unfollow')
@click.argument('channel')
def cmd_unfollow(channel):
"""Unfollow a channel"""
unfollow_channel(channel)
@main.command('auth')
@click.option('--force', '-f', is_flag=True, help='Overwrite existing OAuth token')
def cmd_auth(force):
"""Authenticate with Twitch"""
config = get_config()
if (config['oauth'] != '') and (not force):
print('You are already authenticated.')
return
token = authenticate()
if token != '':
config['oauth'] = token
save_config()
print('Authentication complete.')
else:
print('Authentication cancelled.')
def get_available_streams(url):
command = 'streamlink -j {}'.format(url)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
j_out = json.loads(output.decode())
streams = []
for stream in j_out['streams']:
streams.append(stream)
return streams
def play_url(url, quality=None):
if quality is None:
quality = ''
command = 'streamlink {} {}'.format(url, quality)
process = subprocess.Popen(command.split(), stdout=None, stderr=None)
output, error = process.communicate()
def play_stream(channel, quality=None):
"""Load a stream and open the player"""
channel_id = get_channel_id(channel)
if channel_id is None:
print('The channel "{}" does not exist'.format(channel))
return
play_url('twitch.tv/{}'.format(channel), quality=quality)
def list_streams(game=None, flat=False, playback_quality=None):
"""Load the list of streams and prompt the user to chose one."""
config = get_config()
if config['oauth'] == '':
print('You have to provide a Twitch OAuth token to list followed '
'streams.')
print('Run "{} auth" to authenticate.'.format(sys.argv[0]))
sys.exit(1)
if game is not None:
streams = get_game_streams(game)
else:
streams = get_followed_streams()
if streams is None:
print('Something went wrong while trying to fetch data from the '
'Twitch API')
sys.exit(1)
elif len(streams) == 0:
print('No streams online now')
return
print_stream_list(streams, title='Streams online now', flat=flat)
if not flat:
selection = input('Stream ID: ')
try:
selection = int(selection)
except:
return
else:
return
if not (0 < selection <= len(streams)):
return
play_stream(streams[selection - 1]['channel']['name'], quality=playback_quality)
def get_followed_streams():
config = get_config()
url = 'https://api.twitch.tv/kraken/streams/followed'
headers = {
'Accept': 'application/vnd.twitchtv.v5+json',
'Authorization': 'OAuth {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
if 'streams' not in response:
return None
return response['streams']
def get_game_streams(game):
config = get_config()
query = { 'game': game }
url = 'https://api.twitch.tv/kraken/streams/?{}'.format(urlencode(query))
headers = {
'Accept': 'application/vnd.twitchtv.v5+json',
'Authorization': 'OAuth {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
if 'streams' not in response:
return None
return response['streams']
def list_vods(channel, flat, playback_quality=None):
vods = get_channel_vods(channel)
if vods is None:
return
elif len(vods) == 0:
print('No recent VODs for {}'.format(channel))
return
print_vod_list(vods, title='{}\'s recent VODs'.format(channel))
if not flat:
selection = input('VOD ID: ')
try:
selection = int(selection)
except:
return
if (0 < selection <= len(vods)):
play_url(vods[selection-1]['url'], quality=playback_quality)
def get_channel_vods(channel):
config = get_config()
channel_id = get_channel_id(channel)
if channel_id is None:
print('The channel "{}" does not exist'.format(channel))
return
url = 'https://api.twitch.tv/kraken/channels/{}/videos?broadcast_type=archive'.format(channel_id)
headers = {
'Accept': 'application/vnd.twitchtv.v5+json',
'Authorization': 'OAuth {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
if 'videos' not in response:
return None
return response['videos']
def print_stream_list(streams, title=None, flat=False):
if title and not flat:
print(title)
print('')
if flat:
format = '{1[channel][name]}'
else:
ind_len = len(str(len(streams)))
bullet = '{0: >' + str(ind_len + 2) + 's}'
display_name = '{1[channel][display_name]}'
status = '{1[channel][status]}'
#channel_name = '{1[channel][name]}'
game = '{1[channel][game]}'
viewers = '[{1[viewers]} viewers]'
format = (colored(bullet + ' ', 'light_red')
+ colored(display_name + ': ', 'light_blue', attrs=['bold'])
+ colored(game + ' ', 'light_yellow')
+ colored(viewers + '\n', 'light_green')
+ (' ' * (ind_len + 3))
+ colored(status + '\n', 'light_grey'))
i = 1
for stream in streams:
print(format.format('[' + str(i) + ']', stream))
i += 1
def print_vod_list(vods, title=None, flat=False):
if title and not flat:
print(title)
print('')
if flat:
format = '{1[url]}'
else:
ind_len = len(str(len(vods)))
bullet = '{0: >' + str(ind_len + 2) + 's}'
game = '{1[game]}'
title = '{1[title]}'
date = 'Recorded: {1[created_at]}'
format = (colored(bullet + ' ', 'light_red')
+ colored(game + ': ', 'light_blue', attrs=['bold'])
+ colored(title + '\n', 'light_yellow')
+ (' ' * (ind_len + 3))
+ colored(date + '\n', 'light_grey'))
i = 1
for vod in vods:
print(format.format('[' + str(i) + ']', vod))
i += 1
def follow_channel(channel):
own_id = get_own_channel_id()
channel_id = get_channel_id(channel)
if channel_id is None:
print('The channel "{}" does not exist'.format(channel))
return
url = 'users/{}/follows/channels/{}'.format(own_id, channel_id)
response = twitchapi_request(url, method='put')
print('You now follow {}'.format(channel))
def unfollow_channel(channel):
own_id = get_own_channel_id()
channel_id = get_channel_id(channel)
if channel_id is None:
print('The channel "{}" does not exist'.format(channel))
return
url = 'users/{}/follows/channels/{}'.format(own_id, channel_id)
response = twitchapi_request(url, method='delete')
print('You don\'t follow {} anymore'.format(channel))
def get_own_channel_id():
url = ''
response = twitchapi_request(url)
return response['token']['user_id']
def get_channel_id(name):
query = { 'login': name }
url = 'users?{}'.format(urlencode(query))
response = twitchapi_request(url)
if response['_total'] == 0:
return None
return response['users'][0]['_id']
def authenticate():
query = {
'client_id': TWITCH_CLIENT_ID,
'redirect_uri': 'https://butt4cak3.github.io/twitch-cli/oauth.html',
'response_type': 'token',
'scope': 'user_follows_edit'
}
url = ('https://api.twitch.tv/kraken/oauth2/authorize/?{}'
.format(urlencode(query)))
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
print('Couldn\'t open a browser. Open this URL in your browser to '
'continue:')
print(url)
return
token = input('OAuth token: ')
return token.strip()
def twitchapi_request(url, method='get'):
config = get_config()
url = 'https://api.twitch.tv/kraken/' + url
headers = {
'Accept': 'application/vnd.twitchtv.v5+json',
'Client-ID': TWITCH_CLIENT_ID,
'Authorization': 'OAuth {}'.format(config['oauth'])
}
if method == 'get':
request = requests.get(url, headers=headers)
elif method == 'put':
request = requests.put(url, headers=headers)
elif method == 'delete':
request = requests.delete(url, headers=headers)
try:
data = request.json()
except:
print(request.text)
return None
return data
if __name__ == '__main__':
main()
| |
from unittest import TestCase
import unittest
from .context import PySimpleAutomata
import copy
import itertools
from PySimpleAutomata import NFA
from PySimpleAutomata import AFW
from PySimpleAutomata import automata_IO
class TestAfwWordAcceptance(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_word_acceptance_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_word_acceptance_test_01.json')
self.afw_word_acceptance_test_empty = {
'alphabet': set(),
'states': set(),
'initial_states': set(),
'accepting_states': set(),
'transitions': {}
}
def test_word_acceptance(self):
""" Tests a correct word acceptance """
self.assertTrue(
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01,
['a', 'b', 'b', 'a', 'a', 'b', 'a', 'a']))
def test_word_acceptance_false(self):
""" Tests a non correct word to be refused, with good alphabet """
self.assertFalse(
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01,
['a', 'b', 'a']))
def test_word_acceptance_wrong_alphabet(self):
""" Tests a non correct word, with letters not form the afw alphabet
"""
self.assertFalse(
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01,
['a', 'b', 'wrong']))
def test_word_acceptance_empty_word(self):
""" Tests an empty word"""
self.afw_word_acceptance_test_01['initial_state'] = 'q1'
self.assertFalse(
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01, []))
@unittest.expectedFailure
def test_word_acceptance_wrong_input_1(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_word_acceptance(1, ['a', 'b', 'b', 'a', 'b'])
@unittest.expectedFailure
def test_word_acceptance_wrong_input_2(self):
""" Tests an input different from a list() object. [EXPECTED
FAILURE]"""
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01, 1)
@unittest.expectedFailure
def test_word_acceptance_wrong_dict(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_word_acceptance({'goofy': 'donald'}, ['a', 'b', 'b', 'a', 'b'])
def test_word_acceptance_check_side_effects(self):
""" Tests that the function doesn't make any side effect on the
input"""
before = copy.deepcopy(self.afw_word_acceptance_test_01)
AFW.afw_word_acceptance(self.afw_word_acceptance_test_01,
['a', 'b', 'b', 'a', 'a', 'b', 'a', 'a'])
self.assertDictEqual(before, self.afw_word_acceptance_test_01)
class TestNfaToAfwConversion(TestCase):
def setUp(self):
self.maxDiff = None
self.nfa_nfa_to_afw_test_01 = automata_IO.nfa_dot_importer(
'./tests/dot/afw/nfa_nfa_to_afw_test_01.dot')
self.afw_nfa_to_afw_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nfa_to_afw_test_01.json')
self.nfa_nfa_to_afw_test_empty = {
'alphabet': set(),
'states': set(),
'initial_states': set(),
'accepting_states': set(),
'transitions': {}
}
self.afw_nfa_to_afw_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_nfa_to_afw_conversion(self):
""" Tests a correct nfa to afw conversion """
afw_01 = AFW.nfa_to_afw_conversion(self.nfa_nfa_to_afw_test_01)
self.assertSetEqual(afw_01['alphabet'],
self.afw_nfa_to_afw_test_01['alphabet'])
self.assertSetEqual(afw_01['states'],
self.afw_nfa_to_afw_test_01['states'])
self.assertEqual(afw_01['initial_state'],
self.afw_nfa_to_afw_test_01['initial_state'])
self.assertSetEqual(afw_01['accepting_states'],
self.afw_nfa_to_afw_test_01['accepting_states'])
self.assertEqual(len(afw_01['alphabet']),
len(self.afw_nfa_to_afw_test_01['alphabet']))
# self.assertDictEqual(afw_01, self.afw_nfa_to_afw_test_01)
# due to set-to-string serialization undecidability of items order,
# it is not possible to match the result
# of the operation to a predetermined result without enlist all the
# possible combination of S^2
# i.e a result may be 's2 or s5' or 's5 or s2' unpredictably
def test_nfa_to_afw_conversion_empty_states(self):
""" Tests converting an empty nfa """
expected_solution = {
'alphabet': set(),
'states': {'root'},
'initial_state': 'root',
'accepting_states': set(),
'transitions': {}
}
self.assertDictEqual(
AFW.nfa_to_afw_conversion(self.nfa_nfa_to_afw_test_empty),
expected_solution)
def test_nfa_to_afw_conversion_empty_transition(self):
""" Tests converting an nfa without transition """
expected_solution = {
'alphabet': self.nfa_nfa_to_afw_test_01['alphabet'],
'states': self.nfa_nfa_to_afw_test_01['states'].union({'root'}),
'initial_state': 'root',
'accepting_states': self.nfa_nfa_to_afw_test_01[
'accepting_states'],
'transitions': {}
}
self.nfa_nfa_to_afw_test_01['transitions'] = {}
self.assertDictEqual(
AFW.nfa_to_afw_conversion(self.nfa_nfa_to_afw_test_01),
expected_solution)
@unittest.expectedFailure
def test_nfa_to_afw_conversion_wrong_input(self):
""" Tests the function using an input different from a dict object.
[EXPECTED FAILURE] """
AFW.nfa_to_afw_conversion(0)
@unittest.expectedFailure
def test_nfa_to_afw_conversion_wrong_dict(self):
""" Tests the function using an input different from a well
formatted dict representing a afw. [EXPECTED FAILURE] """
AFW.nfa_to_afw_conversion({'goofy': 'donald'})
def test_nfa_to_afw_conversion_side_effects(self):
""" Tests the function doesn't make any side effect on the input """
before = copy.deepcopy(self.nfa_nfa_to_afw_test_01)
AFW.nfa_to_afw_conversion(self.nfa_nfa_to_afw_test_01)
self.assertDictEqual(before, self.nfa_nfa_to_afw_test_01)
class TestAfwToNfaConversion(TestCase):
def setUp(self):
self.maxDiff = None
self.nfa_afw_to_nfa_test_01 = automata_IO.nfa_dot_importer(
'./tests/dot/afw/nfa_afw_to_nfa_test_01.dot')
self.afw_afw_to_nfa_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_afw_to_nfa_test_01.json')
self.afw_nonemptiness_check_test_2 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nonemptiness_check_test_2.json')
self.nfa_empty = {
'alphabet': set(),
'states': set(),
'initial_states': set(),
'accepting_states': set(),
'transitions': {}
}
self.afw_afw_to_nfa_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_to_nfa_conversion_language(self):
""" Test a correct afw conversion to nfa comparing the language read
by the two automaton """
nfa_01 = AFW.afw_to_nfa_conversion(self.afw_afw_to_nfa_test_01)
# automata_IO.nfa_to_dot(nfa_01, 'afw_to_nfa_01')
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
# print(word)
afw_acceptance = AFW.afw_word_acceptance(
self.afw_afw_to_nfa_test_01, word)
nfa_acceptance = NFA.nfa_word_acceptance(nfa_01, word)
self.assertEqual(afw_acceptance, nfa_acceptance)
i += 1
def test_afw_to_nfa_conversion_language_bis_bis(self):
""" Test a correct afw conversion to nfa comparing the language read
by the two automaton """
nfa_01 = AFW.afw_to_nfa_conversion(self.afw_nonemptiness_check_test_2)
# automata_IO.nfa_to_dot(nfa_01, 'afw_to_nfa_strange')
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
# print(word)
afw_acceptance = AFW.afw_word_acceptance(
self.afw_nonemptiness_check_test_2, word)
nfa_acceptance = NFA.nfa_word_acceptance(nfa_01, word)
self.assertEqual(afw_acceptance, nfa_acceptance)
i += 1
def test_afw_to_nfa_conversion_language_bis(self):
""" Test a correct afw conversion to nfa comparing the language read
by the two automaton.
Here we take a nfa, we covert it to afw and back to nfa,
then the original and final nfa are compared trough the language
read.
"""
original_nfa_to_afw = AFW.nfa_to_afw_conversion(
self.nfa_afw_to_nfa_test_01)
nfa_01 = AFW.afw_to_nfa_conversion(original_nfa_to_afw)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
# print(word)
original_nfa_acceptance = NFA.nfa_word_acceptance(
self.nfa_afw_to_nfa_test_01, word)
nfa_acceptance = NFA.nfa_word_acceptance(nfa_01, word)
self.assertEqual(original_nfa_acceptance, nfa_acceptance)
i += 1
def test_afw_to_nfa_conversion_empty_states(self):
""" Tests a AFW to NFA conversion with an empty AFW """
nfa_01 = AFW.afw_to_nfa_conversion(self.afw_afw_to_nfa_test_empty)
self.nfa_empty['initial_states'] = {(None,)}
self.nfa_empty['states'] = self.nfa_empty['initial_states']
self.assertDictEqual(nfa_01, self.nfa_empty)
def test_afw_to_nfa_conversion_empty_transitions(self):
""" Tests a AFW to NFA conversion with a AFW without transitions """
self.afw_afw_to_nfa_test_01['transitions'] = {}
nfa_01 = AFW.afw_to_nfa_conversion(self.afw_afw_to_nfa_test_01)
result = {
'initial_states': {('s',)},
'accepting_states': {('s',)},
'transitions': {},
'states': {('s',)},
'alphabet': {'b', 'a'}
}
self.assertSetEqual(nfa_01['initial_states'], result['initial_states'])
self.assertDictEqual(nfa_01['transitions'], result['transitions'])
self.assertSetEqual(nfa_01['alphabet'], result['alphabet'])
self.assertEqual(len(nfa_01['accepting_states']),
len(result['accepting_states']))
self.assertEqual(len(nfa_01['states']), len(result['states']))
@unittest.expectedFailure
def test_afw_to_nfa_conversion_wrong_input(self):
""" Tests the function using an input different from a dict object.
[EXPECTED FAILURE] """
AFW.afw_to_nfa_conversion(0)
@unittest.expectedFailure
def test_afw_to_nfa_conversion_wrong_dict(self):
""" Tests the function using an input different from a well
formatted dict representing a afw. [EXPECTED FAILURE] """
AFW.afw_to_nfa_conversion({'goofy': 'donald'})
def test_afw_to_nfa_conversion_side_effects(self):
""" Tests the function doesn't make any side effect on the input """
before = copy.deepcopy(self.afw_afw_to_nfa_test_01)
AFW.afw_to_nfa_conversion(self.afw_afw_to_nfa_test_01)
self.assertDictEqual(before, self.afw_afw_to_nfa_test_01)
class TestAfwCompletion(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_completion_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_completion_test_01.json')
self.afw_completion_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_completion(self):
""" Tests a correct afw completion comparing the language read,
that must be the same"""
original = copy.deepcopy(self.afw_completion_test_01)
AFW.afw_completion(self.afw_completion_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance = AFW.afw_word_acceptance(original, word)
completed_acceptance = AFW.afw_word_acceptance(
self.afw_completion_test_01, word)
self.assertEqual(original_acceptance, completed_acceptance)
i += 1
def test_afw_completion_empty_states(self):
""" Tests a completion of a afw without states"""
AFW.afw_completion(self.afw_completion_test_empty)
result = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
self.assertDictEqual(self.afw_completion_test_empty, result)
def test_afw_completion_empty_transitions(self):
""" Tests a completion of a afw without transitions"""
self.afw_completion_test_01['transitions'] = {}
result = copy.deepcopy(self.afw_completion_test_01)
for state in result['states']:
for action in result['alphabet']:
result['transitions'][state, action] = 'False'
AFW.afw_completion(self.afw_completion_test_01)
self.assertDictEqual(self.afw_completion_test_01, result)
@unittest.expectedFailure
def test_afw_completion_wrong_input(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_completion(0)
@unittest.expectedFailure
def test_afw_completion_wrong_dict(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_completion({'goofy': 'donald'})
def test_afw_completion_side_effects(self):
""" Tests the function makes side effect on the input """
before = copy.deepcopy(self.afw_completion_test_01)
AFW.afw_completion(self.afw_completion_test_01)
self.assertNotEqual(before, self.afw_completion_test_01)
class TestAfwComplementation(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_complementation_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_complementation_test_01.json')
self.afw_complementation_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_complementation(self):
""" Test a correct afw complementation comparing the language read,
that must be discording"""
afw_complemented = AFW.afw_complementation(
self.afw_complementation_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
afw_acceptance = AFW.afw_word_acceptance(
self.afw_complementation_test_01, word)
complement_acceptance = AFW.afw_word_acceptance(
afw_complemented, word)
self.assertNotEqual(afw_acceptance, complement_acceptance)
i += 1
def test_afw_complementation_empty_states(self):
""" Tests a complementation of a afw without states"""
complemented = AFW.afw_complementation(
self.afw_complementation_test_empty)
self.assertEqual(complemented, self.afw_complementation_test_empty)
def test_afw_complementation_empty_transitions(self):
""" Tests a complementation of a afw without transitions"""
self.afw_complementation_test_01['transitions'] = {}
result = copy.deepcopy(self.afw_complementation_test_01)
result['accepting_states'] = {'q1', 'q2'}
for state in result['states']:
for action in result['alphabet']:
result['transitions'][state, action] = 'True'
complemented = AFW.afw_complementation(
self.afw_complementation_test_01)
self.assertEqual(complemented, result)
@unittest.expectedFailure
def test_afw_complementation_wrong_input(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_complementation(0)
@unittest.expectedFailure
def test_afw_complementation_wrong_dict(self):
""" Tests a dict() in input different from a well formatted dict()
representing a DFA. [EXPECTED FAILURE]"""
AFW.afw_complementation({'goofy': 'donald'})
def test_afw_complementation_side_effects(self):
""" Tests the function doesn't make any side effect on the input """
before = copy.deepcopy(self.afw_complementation_test_01)
AFW.afw_complementation(self.afw_complementation_test_01)
self.assertDictEqual(before, self.afw_complementation_test_01)
class TestAfwUnion(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_union_1_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_union_1_test_01.json')
self.afw_union_2_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_union_2_test_01.json')
self.afw_union_3_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_union_3_test_01.json')
self.afw_union_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_union_disjoint(self):
""" Tests a correct afw union with completely disjoint afws """
AFW.rename_afw_states(self.afw_union_2_test_01, 'a_')
union = AFW.afw_union(self.afw_union_1_test_01,
self.afw_union_2_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_union_2_test_01, word)
union_acceptance = AFW.afw_word_acceptance(union, word)
self.assertEqual(
original_acceptance_1 or original_acceptance_2,
union_acceptance)
i += 1
def test_afw_union_intersecting(self):
""" Tests a correct afw union where the afws have some state in
common """
AFW.rename_afw_states(self.afw_union_3_test_01, 'a_')
union = AFW.afw_union(self.afw_union_1_test_01,
self.afw_union_3_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
# print(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_union_3_test_01, word)
union_acceptance = AFW.afw_word_acceptance(union, word)
self.assertEqual(
original_acceptance_1 or original_acceptance_2,
union_acceptance)
i += 1
def test_afw_union_equals(self):
""" Tests a correct afw union with the same afw """
AFW.rename_afw_states(self.afw_union_1_test_01, 'a_')
union = AFW.afw_union(self.afw_union_1_test_01,
self.afw_union_1_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
union_acceptance = AFW.afw_word_acceptance(union, word)
self.assertEqual(
original_acceptance_1 or original_acceptance_2,
union_acceptance)
i += 1
def test_afw_union_empty_states_1(self):
""" Tests a afw union where the first afw is empty """
union = AFW.afw_union(self.afw_union_test_empty,
self.afw_union_1_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_union_test_empty, word)
union_acceptance = AFW.afw_word_acceptance(union, word)
self.assertEqual(
original_acceptance_1 or original_acceptance_2,
union_acceptance)
i += 1
def test_afw_union_empty_states_2(self):
""" Tests a afw union where the second afw is empty """
union = AFW.afw_union(self.afw_union_1_test_01,
self.afw_union_test_empty)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_union_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_union_test_empty, word)
union_acceptance = AFW.afw_word_acceptance(union, word)
self.assertEqual(
original_acceptance_1 or original_acceptance_2,
union_acceptance)
i += 1
@unittest.expectedFailure
def test_afw_union_wrong_input_1(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_union(0, self.afw_union_1_test_01)
@unittest.expectedFailure
def test_afw_union_wrong_input_2(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_union(self.afw_union_1_test_01, 0)
@unittest.expectedFailure
def test_afw_union_wrong_dict_1(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_union(self.afw_union_1_test_01, {'goofy': 'donald'})
@unittest.expectedFailure
def test_afw_union_wrong_dict_2(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_union({'goofy': 'donald'}, self.afw_union_1_test_01)
def test_afw_union_side_effects_1(self):
""" Tests the function makes side effect on the first input """
before = copy.deepcopy(self.afw_union_1_test_01)
AFW.afw_union(self.afw_union_1_test_01, self.afw_union_2_test_01)
self.assertEqual(before, self.afw_union_1_test_01)
def test_afw_union_side_effects_2(self):
""" Tests the function makes side effect on the second input """
before = copy.deepcopy(self.afw_union_2_test_01)
AFW.afw_union(self.afw_union_1_test_01, self.afw_union_2_test_01)
self.assertEqual(before, self.afw_union_2_test_01)
class TestAfwIntersection(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_intersection_1_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_intersection_1_test_01.json')
self.afw_intersection_2_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_intersection_2_test_01.json')
self.afw_intersection_3_test_01 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_intersection_3_test_01.json')
self.afw_intersection_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_intersection_disjoint(self):
""" Tests a correct afw intersection with completely disjoint afws """
AFW.rename_afw_states(self.afw_intersection_2_test_01, 'a_')
intersection = AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_2_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_intersection_2_test_01, word)
intersection_acceptance = AFW.afw_word_acceptance(intersection,
word)
self.assertEqual(
original_acceptance_1 and original_acceptance_2,
intersection_acceptance)
i += 1
def test_afw_intersection_intersecting(self):
""" Tests a correct afw intersection where the afws have some state
in common """
AFW.rename_afw_states(self.afw_intersection_3_test_01, 'a_')
intersection = AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_3_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
# print(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_intersection_3_test_01, word)
intersection_acceptance = AFW.afw_word_acceptance(intersection,
word)
self.assertEqual(
original_acceptance_1 and original_acceptance_2,
intersection_acceptance)
i += 1
def test_afw_intersection_equals(self):
""" Tests a correct afw intersection with the same afw """
AFW.rename_afw_states(self.afw_intersection_1_test_01, 'a_')
intersection = AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_1_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
intersection_acceptance = AFW.afw_word_acceptance(intersection,
word)
self.assertEqual(
original_acceptance_1 and original_acceptance_2,
intersection_acceptance)
i += 1
def test_afw_intersection_empty_states_1(self):
""" Tests a afw intersection where the first afw is empty """
intersection = AFW.afw_intersection(self.afw_intersection_test_empty,
self.afw_intersection_1_test_01)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_intersection_test_empty, word)
intersection_acceptance = AFW.afw_word_acceptance(intersection,
word)
self.assertEqual(
original_acceptance_1 and original_acceptance_2,
intersection_acceptance)
i += 1
def test_afw_intersection_empty_states_2(self):
""" Tests a afw intersection where the second afw is empty """
intersection = AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_test_empty)
i = 0
last = 7
while i <= last:
base = list(itertools.repeat('a', i))
base += list(itertools.repeat('b', i))
# build all permutation of 'a' and 'b' till length i
word_set = set(itertools.permutations(base, i))
for word in word_set:
word = list(word)
original_acceptance_1 = AFW.afw_word_acceptance(
self.afw_intersection_1_test_01, word)
original_acceptance_2 = AFW.afw_word_acceptance(
self.afw_intersection_test_empty, word)
intersection_acceptance = AFW.afw_word_acceptance(intersection,
word)
self.assertEqual(
original_acceptance_1 and original_acceptance_2,
intersection_acceptance)
i += 1
@unittest.expectedFailure
def test_afw_intersection_wrong_input_1(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_intersection(0, self.afw_intersection_1_test_01)
@unittest.expectedFailure
def test_afw_intersection_wrong_input_2(self):
""" Tests an input different from a dict() object. [EXPECTED
FAILURE]"""
AFW.afw_intersection(self.afw_intersection_1_test_01, 0)
@unittest.expectedFailure
def test_afw_intersection_wrong_dict_1(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_intersection(self.afw_intersection_1_test_01,
{'goofy': 'donald'})
@unittest.expectedFailure
def test_afw_intersection_wrong_dict_2(self):
""" Tests a dict() in input different from a well formatted dict()
representing a AFW. [EXPECTED FAILURE]"""
AFW.afw_intersection({'goofy': 'donald'},
self.afw_intersection_1_test_01)
def test_afw_intersection_side_effects_1(self):
""" Tests the function makes side effect on the first input """
before = copy.deepcopy(self.afw_intersection_1_test_01)
AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_2_test_01)
self.assertEqual(before, self.afw_intersection_1_test_01)
def test_afw_intersection_side_effects_2(self):
""" Tests the function makes side effect on the second input """
before = copy.deepcopy(self.afw_intersection_2_test_01)
AFW.afw_intersection(self.afw_intersection_1_test_01,
self.afw_intersection_2_test_01)
self.assertEqual(before, self.afw_intersection_2_test_01)
class TestAfwNonemptinessCheck(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_nonemptiness_check_test_1 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nonemptiness_check_test_1.json')
self.afw_nonemptiness_check_test_2 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nonemptiness_check_test_2.json')
self.afw_nonemptiness_check_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_nonemptiness_check(self):
""" Tests a correct afw nonemptiness check """
self.assertTrue(
AFW.afw_nonemptiness_check(self.afw_nonemptiness_check_test_1))
def test_afw_nonemptiness_check_false(self):
""" Tests a correct afw nonemptiness check, where the afw is empty """
self.assertFalse(
AFW.afw_nonemptiness_check(self.afw_nonemptiness_check_test_2))
def test_afw_nonemptiness_check_empty(self):
""" Tests the nonemptiness of an empty afw"""
self.assertFalse(
AFW.afw_nonemptiness_check(self.afw_nonemptiness_check_test_empty))
@unittest.expectedFailure
def test_afw_nonemptiness_check_wrong_dict(self):
""" Tests the nonemptiness of an input dict different from a dict
representing a afw. [EXPECTED FAILURE] """
self.assertFalse(AFW.afw_nonemptiness_check({}))
@unittest.expectedFailure
def test_afw_nonemptiness_check_wrong_input(self):
""" Tests the nonemptines of an input different from a dict object.
[EXPECTED FAILURE] """
self.assertFalse(AFW.afw_nonemptiness_check(0))
def test_afw_nonemptiness_check_side_effects(self):
""" Tests that the function doesn't make any side effect on the
input"""
before = copy.deepcopy(self.afw_nonemptiness_check_test_1)
AFW.afw_nonemptiness_check(self.afw_nonemptiness_check_test_1)
self.assertDictEqual(before, self.afw_nonemptiness_check_test_1)
class TestAfwNonuniversalityCheck(TestCase):
def setUp(self):
self.maxDiff = None
self.afw_nonuniversality_check_test_1 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nonuniversality_check_test_1.json')
self.afw_nonuniversality_check_test_2 = automata_IO.afw_json_importer(
'./tests/json/afw/afw_nonuniversality_check_test_2.json')
self.afw_nonuniversality_check_test_empty = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': {}
}
def test_afw_nonuniversality_check(self):
""" Tests a correct afw nonuniversality check """
self.assertTrue(AFW.afw_nonuniversality_check(
self.afw_nonuniversality_check_test_1))
def test_afw_nonuniversality_check_false(self):
""" Tests a correct afw nonuniversality check, where the afw is
empty """
self.assertFalse(AFW.afw_nonuniversality_check(
self.afw_nonuniversality_check_test_2))
def test_afw_nonuniversality_check_empty(self):
""" Tests the nonuniversality of an empty afw"""
self.assertFalse(AFW.afw_nonuniversality_check(
self.afw_nonuniversality_check_test_empty))
@unittest.expectedFailure
def test_afw_nonuniversality_check_wrong_dict(self):
""" Tests the nonuniversality of an input dict different from a dict
representing a afw. [EXPECTED FAILURE] """
self.assertFalse(AFW.afw_nonuniversality_check({}))
@unittest.expectedFailure
def test_afw_nonuniversality_check_wrong_input(self):
""" Tests the nonuniversality of an input different from a dict
object. [EXPECTED FAILURE] """
self.assertFalse(AFW.afw_nonuniversality_check(0))
def test_afw_nonuniversality_check_side_effects(self):
""" Tests that the function doesn't make any side effect on the
input"""
before = copy.deepcopy(self.afw_nonuniversality_check_test_1)
AFW.afw_nonuniversality_check(self.afw_nonuniversality_check_test_1)
self.assertDictEqual(before, self.afw_nonuniversality_check_test_1)
| |
"""
Websocket client for protocol version 13 using the Tornado IO loop.
http://tools.ietf.org/html/rfc6455
Based on the websocket server in tornado/websocket.py by Jacob Kristhammar.
"""
import array
import base64
import functools
import hashlib
import logging
import os
import re
import socket
import struct
import sys
import time
import urlparse
import tornado.escape
from tornado import ioloop, iostream
from tornado.httputil import HTTPHeaders
from tornado.util import bytes_type, b
# The initial handshake over HTTP.
INIT = """\
GET %(path)s HTTP/1.1
Host: %(host)s:%(port)s
Upgrade: websocket
Connection: Upgrade
Sec-Websocket-Key: %(key)s
Sec-Websocket-Version: 13\
"""
# Magic string defined in the spec for calculating keys.
MAGIC = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def frame(data, opcode=0x01):
"""Encode data in a websocket frame."""
# [fin, rsv, rsv, rsv] [opcode]
frame = struct.pack('B', 0x80 | opcode)
# Our next bit is 1 since we're using a mask.
length = len(data)
if length < 126:
# If length < 126, it fits in the next 7 bits.
frame += struct.pack('B', 0x80 | length)
elif length <= 0xFFFF:
# If length < 0xffff, put 126 in the next 7 bits and write the length
# in the next 2 bytes.
frame += struct.pack('!BH', 0x80 | 126, length)
else:
# Otherwise put 127 in the next 7 bits and write the length in the next
# 8 bytes.
frame += struct.pack('!BQ', 0x80 | 127, length)
# Clients must apply a 32-bit mask to all data sent.
mask = map(ord, os.urandom(4))
frame += struct.pack('!BBBB', *mask)
# Mask each byte of data using a byte from the mask.
msg = [ord(c) ^ mask[i % 4] for i, c in enumerate(data)]
frame += struct.pack('!' + 'B' * length, *msg)
return frame
class WebSocket(object):
"""Websocket client for protocol version 13 using the Tornado IO loop."""
def __init__(self, url, io_loop=None, extra_headers=None):
ports = {'ws': 80, 'wss': 443}
self.url = urlparse.urlparse(url)
self.host = self.url.hostname
self.port = self.url.port or ports[self.url.scheme]
self.path = self.url.path or '/'
if extra_headers is not None and len(extra_headers) > 0:
header_set = []
for k, v in extra_headers.iteritems():
header_set.append("%s: %s" % (k, v))
self.headers = "\r\n".join(header_set)
else:
self.headers = None
self.client_terminated = False
self.server_terminated = False
self._final_frame = False
self._frame_opcode = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self.key = base64.b64encode(os.urandom(16))
self.stream = iostream.IOStream(socket.socket(), io_loop)
self.stream.connect((self.host, self.port), self._on_connect)
def on_open(self):
pass
def on_message(self, data):
pass
def on_ping(self):
pass
def on_pong(self):
pass
def on_close(self):
pass
def on_unsupported(self):
pass
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes_type)
self._write_frame(True, opcode, message)
def ping(self):
self._write_frame(True, 0x9, b(''))
def close(self):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
self._write_frame(True, 0x8, b(""))
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
time.time() + 5, self._abort)
def _write_frame(self, fin, opcode, data):
self.stream.write(frame(data, opcode))
def _on_connect(self):
request = '\r\n'.join(INIT.splitlines()) % self.__dict__
if self.headers is not None:
request += '\r\n' + self.headers
request += '\r\n\r\n'
self.stream.write(tornado.escape.utf8(request))
self.stream.read_until('\r\n\r\n', self._on_headers)
def _on_headers(self, data):
first, _, rest = data.partition('\r\n')
headers = HTTPHeaders.parse(rest)
# Expect HTTP 101 response.
if not re.match('HTTP/[^ ]+ 101', first):
self._async_callback(self.on_unsupported)()
self.close()
else:
# Expect Connection: Upgrade.
assert headers['Connection'].lower() == 'upgrade'
# Expect Upgrade: websocket.
assert headers['Upgrade'].lower() == 'websocket'
# Sec-WebSocket-Accept should be derived from our key.
accept = base64.b64encode(hashlib.sha1(self.key + MAGIC).digest())
assert headers['Sec-WebSocket-Accept'] == accept
self._async_callback(self.on_open)()
self._receive_frame()
def _receive_frame(self):
self.stream.read_bytes(2, self._on_frame_start)
def _on_frame_start(self, data):
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & 0x80
reserved_bits = header & 0x70
self._frame_opcode = header & 0xf
self._frame_opcode_is_control = self._frame_opcode & 0x8
if reserved_bits:
# client is using as-yet-undefined extensions; abort
return self._abort()
if (payloadlen & 0x80):
# Masked frame -> abort connection
return self._abort()
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
return self._abort()
if payloadlen < 126:
self._frame_length = payloadlen
self.stream.read_bytes(self._frame_length, self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
def _on_frame_length_16(self, data):
self._frame_length = struct.unpack("!H", data)[0]
self.stream.read_bytes(self._frame_length, self._on_frame_data)
def _on_frame_length_64(self, data):
self._frame_length = struct.unpack("!Q", data)[0]
self.stream.read_bytes(self._frame_length, self._on_frame_data)
def _on_frame_data(self, data):
unmasked = array.array("B", data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += unmasked
if self._final_frame:
opcode = self._fragmented_message_opcode
unmasked = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = unmasked
if self._final_frame:
self._handle_message(opcode, unmasked.tostring())
if not self.client_terminated:
self._receive_frame()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close()
self.close()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if opcode == 0x1:
# UTF-8 data
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._async_callback(self.on_message)(decoded)
elif opcode == 0x2:
# Binary data
self._async_callback(self.on_message)(data)
elif opcode == 0x8:
# Close
self.client_terminated = True
self.close()
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
self._async_callback(self.on_ping)()
elif opcode == 0xA:
# Pong
self._async_callback(self.on_pong)()
else:
self._abort()
def _async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions properly and closes this WebSocket if an exception
is uncaught.
"""
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception:
logging.error('Uncaught exception', exc_info=True)
self._abort()
return wrapper
def main(url, message='hello, world'):
class HelloSocket(WebSocket):
def on_open(self):
self.ping()
print '>>', message
self.write_message(message)
def on_message(self, data):
print 'on_message:', data
msg = raw_input('>> ')
if msg == 'ping':
self.ping()
elif msg == 'die':
self.close()
else:
self.write_message(msg)
def on_close(self):
print 'on_close'
def on_pong(self):
print 'on_pong'
ws = HelloSocket(url)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
finally:
ws.close()
if __name__ == '__main__':
main(*sys.argv[1:])
| |
import logging
import os
import sys
from pprint import pprint, pformat
import sloelib
from .sloeyoutube.sloeyoutubeplaylist import SloeYouTubePlaylist
from .sloeyoutube.sloeyoutubesession import SloeYouTubeSession
from .sloeyoutube.sloeyoutubetree import SloeYouTubeTree
from .sloeyoutube.sloeyoutubeupload import SloeYouTubeUpload
class SloePluginYoutube(sloelib.SloeBasePlugIn):
def __init__(self, *params):
sloelib.SloeBasePlugIn.__init__(self, *params)
sloelib.SloePlaylist.OPTIONAL_ELEMENTS.update({
"youtube_description": "Description expression for playlist on YouTube",
"youtube_privacy": "YouTube privacy setting for playlist",
"youtube_tags": "YouTube tag expression for playlist",
"youtube_title": "Title expression for YouTube playlist"
})
sloelib.SloeTransferSpec.OPTIONAL_ELEMENTS.update({
"youtube_category": "YouTube category used for items",
"youtube_description": "Description expression for items on YouTube",
"youtube_privacy": "YouTube privacy setting for items",
"youtube_tags": "YouTube tag expression for items",
"youtube_title": "Title expression for YouTube items"
})
def command_youtubeauth(self, params, options):
session_r = SloeYouTubeSession("r")
session_r()
session_w = SloeYouTubeSession("w")
session_w()
session_upload = SloeYouTubeSession("upload")
session_upload()
def command_youtubedumptree(self, params, options):
session_r = SloeYouTubeSession("r")
tree = SloeYouTubeTree(session_r)
tree.read()
print tree
def _get_youtube_spec_for_item(self, item, remoteitem, transferspec):
youtube_spec = {
"filepath": item.get_file_path(),
}
elements = (
"category",
"description",
"privacy",
"tags",
"title"
)
for element in elements:
youtube_spec[element] = sloelib.SloeOutputUtil.substitute_for_remote_item(
getattr(transferspec, 'youtube_' + element), item, remoteitem, transferspec)
tags = youtube_spec["tags"].split(":,")
tags.append("oarstackremoteitem=%s" % remoteitem.uuid)
youtube_spec["tags"] = ",".join([x.strip() for x in tags])
return youtube_spec
def do_item_transfer_job(self, item, transferspec):
try:
remoteitem = sloelib.SloeOutputUtil.find_remoteitem(item, transferspec)
youtube_spec = self._get_youtube_spec_for_item(item, remoteitem, transferspec)
logging.debug("youtube_spec=%s" % pformat(youtube_spec))
youtube_session = SloeYouTubeSession("upload")
remote_id = SloeYouTubeUpload.do_upload(youtube_session, youtube_spec)
remoteitem.update({
"description": youtube_spec["description"],
"remote_id": remote_id,
"remote_url": "http://youtu.be/%s" % remote_id,
"title": youtube_spec["title"]
})
remoteitem.verify_creation_data()
sloelib.SloeOutputUtil.create_remoteitem_ini(item, remoteitem)
logging.debug("|YouTubeSpec|=%s" % pformat(youtube_spec))
except sloelib.SloeError, e:
logging.error("Abandoned transfer attempt: %s" % str(e))
def do_update_items_job(self, remoteitem_uuids):
session_w = SloeYouTubeSession("w")
for remoteitem_uuid in remoteitem_uuids:
try:
remoteitem = sloelib.SloeTreeNode.get_object_by_uuid(remoteitem_uuid)
self._update_remote_item(session_w, remoteitem)
except sloelib.SloeError, e:
logging.error("Abandoned transfer attempt: %s" % str(e))
def _update_remote_item(self, session_w, remoteitem):
ids = sloelib.SloeUtil.extract_common_id(remoteitem.common_id)
item = sloelib.SloeTreeNode.get_object_by_uuid(ids["I"])
transferspec = sloelib.SloeTreeNode.get_object_by_uuid(ids["T"])
youtube_spec = self._get_youtube_spec_for_item(item, remoteitem, transferspec)
logging.debug("youtube_spec=%s" % pformat(youtube_spec))
remote_id = SloeYouTubeUpload.do_item_update(session_w, remoteitem.remote_id, youtube_spec)
def do_playlist_transfer_job(self, playlist):
try:
ordered_items = playlist.get_ordered_items()
if len(ordered_items) == 0:
raise sloelib.SloeError("Playlist %s is empty" % playlist.name)
remoteplaylist = sloelib.SloeOutputUtil.find_remoteplaylist(playlist)
youtube_session = SloeYouTubeSession("w")
if remoteplaylist.get("remote_id", None) is not None:
youtube_playlistid = remoteplaylist.remote_id
remote_playlistitems = self._read_existing_playlist(youtube_session, youtube_playlistid)
else:
remote_playlistitems = []
youtube_spec = {}
elements = (
"description",
"privacy",
"tags",
"title"
)
for element in elements:
youtube_spec[element] = sloelib.SloeOutputUtil.substitute_for_remote_playlist(
getattr(playlist, 'youtube_' + element), playlist, remoteplaylist)
tags = youtube_spec["tags"].split(",")
tags.append("oarstackremoteitem=%s" % remoteplaylist.uuid)
youtube_spec["tags"] = ",".join([x.strip() for x in tags])
logging.info("youtube_spec=%s" % pformat(youtube_spec))
youtube_playlist = SloeYouTubePlaylist.do_insert_playlist(youtube_session, youtube_spec)
first_video_youtube_id = ordered_items[0].remote_id
remoteplaylist.update({
"description": youtube_spec["description"],
"remote_id": youtube_playlist["id"],
"remote_url": "http://www.youtube.com/watch?v=%s&list=%s" % (first_video_youtube_id, youtube_playlist["id"]),
"title": youtube_spec["title"]
})
remoteplaylist.verify_creation_data()
sloelib.SloeOutputUtil.create_remoteplaylist_ini(playlist, remoteplaylist)
youtube_playlistid = youtube_playlist["id"]
#SloeYouTubePlaylist.do_playlist_wipe(youtube_session, youtube_playlistid)
#remote_playlistitems = []
for i in xrange(max(len(ordered_items), len(remote_playlistitems))):
prefix = "At position %d: " % (i+1)
if i < len(ordered_items) and i < len(remote_playlistitems):
playlistitem_id, videoId, position = remote_playlistitems[i]
if videoId == ordered_items[i].remote_id and position == i:
logging.info("%sRemote item OK: %s" % (prefix, ordered_items[i].title))
else:
# Item is both local and remote, so operation is update
logging.info("%sUpdating remote id %s item '%s'" % (prefix, playlistitem_id, ordered_items[i].name))
SloeYouTubePlaylist.do_update_playlistitem(youtube_session, youtube_playlistid, playlistitem_id, ordered_items[i].remote_id, i)
elif i < len(ordered_items):
# Item is local but not remote, so operation is insert
logging.info("%sInserting item '%s'" % (prefix, ordered_items[i].name))
SloeYouTubePlaylist.do_insert_playlistitem(youtube_session, youtube_playlistid, ordered_items[i].remote_id, i)
elif i < len(remote_playlistitems):
# Item is remote but not local, so operation is delete
playlistitem_id, videoId, position = remote_playlistitems[i]
logging.info("%sDeleting item remote id %s" % (prefix, playlistitem_id))
SloeYouTubePlaylist.do_delete_playlistitem(youtube_session, playlistitem_id)
else:
raise SloeError("Logical error")
except sloelib.SloeError, e:
logging.error("Abandoned transfer attempt: %s" % str(e))
def _read_existing_playlist(self, youtube_session, playlist_youtube_id):
playlistitem_ids = SloeYouTubePlaylist.do_read_playlist_item_ids(youtube_session, playlist_youtube_id)
if playlistitem_ids is None:
SloeYouTubePlaylist.do_playlist_wipe(youtube_session, playlist_youtube_id)
playlistitem_ids = []
return playlistitem_ids
SloePluginYoutube("youtube")
| |
"""
Internal shared-state variables such as config settings and host lists.
"""
import os
import sys
from optparse import make_option
from fabric.network import HostConnectionCache, ssh
from fabric.version import get_version
from fabric.utils import _AliasDict, _AttributeDict
#
# Win32 flag
#
# Impacts a handful of platform specific behaviors. Note that Cygwin's Python
# is actually close enough to "real" UNIXes that it doesn't need (or want!) to
# use PyWin32 -- so we only test for literal Win32 setups (vanilla Python,
# ActiveState etc) here.
win32 = (sys.platform == 'win32')
#
# Environment dictionary - support structures
#
# By default, if the user (including code using Fabric as a library) doesn't
# set the username, we obtain the currently running username and use that.
def _get_system_username():
"""
Obtain name of current system user, which will be default connection user.
"""
import getpass
username = None
try:
username = getpass.getuser()
# getpass.getuser supported on both Unix and Windows systems.
# getpass.getuser may call pwd.getpwuid which in turns may raise KeyError
# if it cannot find a username for the given UID, e.g. on ep.io
# and similar "non VPS" style services. Rather than error out, just keep
# the 'default' username to None. Can check for this value later if needed.
except KeyError:
pass
except ImportError:
if win32:
import win32api
import win32security
import win32profile
username = win32api.GetUserName()
return username
def _rc_path():
"""
Return platform-specific default file path for $HOME/.fabricrc.
"""
rc_file = '.fabricrc'
rc_path = '~/' + rc_file
expanded_rc_path = os.path.expanduser(rc_path)
if expanded_rc_path == rc_path and win32:
from win32com.shell.shell import SHGetSpecialFolderPath
from win32com.shell.shellcon import CSIDL_PROFILE
expanded_rc_path = "%s/%s" % (
SHGetSpecialFolderPath(0, CSIDL_PROFILE),
rc_file
)
return expanded_rc_path
default_port = '22' # hurr durr
default_ssh_config_path = os.path.join(os.path.expanduser('~'), '.ssh', 'config')
# Options/settings which exist both as environment keys and which can be set on
# the command line, are defined here. When used via `fab` they will be added to
# the optparse parser, and either way they are added to `env` below (i.e. the
# 'dest' value becomes the environment key and the value, the env value).
#
# Keep in mind that optparse changes hyphens to underscores when automatically
# deriving the `dest` name, e.g. `--reject-unknown-hosts` becomes
# `reject_unknown_hosts`.
#
# Furthermore, *always* specify some sort of default to avoid ending up with
# optparse.NO_DEFAULT (currently a two-tuple)! In general, None is a better
# default than ''.
#
# User-facing documentation for these are kept in sites/docs/env.rst.
env_options = [
make_option('-a', '--no_agent',
action='store_true',
default=False,
help="don't use the running SSH agent"
),
make_option('-A', '--forward-agent',
action='store_true',
default=False,
help="forward local agent to remote end"
),
make_option('--abort-on-prompts',
action='store_true',
default=False,
help="abort instead of prompting (for password, host, etc)"
),
make_option('--banner-timeout',
type='int',
default=None,
metavar="N",
help="set ssh banner timeout to N seconds"
),
make_option('-c', '--config',
dest='rcfile',
default=_rc_path(),
metavar='PATH',
help="specify location of config file to use"
),
make_option('--colorize-errors',
action='store_true',
default=False,
help="Color error output",
),
make_option('-D', '--disable-known-hosts',
action='store_true',
default=False,
help="do not load user known_hosts file"
),
make_option('-e', '--eagerly-disconnect',
action='store_true',
default=False,
help="disconnect from hosts as soon as possible"
),
make_option('-f', '--fabfile',
default='fabfile',
metavar='PATH',
help="python module file to import, e.g. '../other.py'"
),
make_option('-g', '--gateway',
default=None,
metavar='HOST',
help="gateway host to connect through"
),
make_option('--hide',
metavar='LEVELS',
help="comma-separated list of output levels to hide"
),
make_option('-H', '--hosts',
default=[],
help="comma-separated list of hosts to operate on"
),
make_option('-i',
action='append',
dest='key_filename',
metavar='PATH',
default=None,
help="path to SSH private key file. May be repeated."
),
make_option('-k', '--no-keys',
action='store_true',
default=False,
help="don't load private key files from ~/.ssh/"
),
make_option('--keepalive',
dest='keepalive',
type=int,
default=0,
metavar="N",
help="enables a keepalive every N seconds"
),
make_option('--linewise',
action='store_true',
default=False,
help="print line-by-line instead of byte-by-byte"
),
make_option('-n', '--connection-attempts',
type='int',
metavar='M',
dest='connection_attempts',
default=1,
help="make M attempts to connect before giving up"
),
make_option('--no-pty',
dest='always_use_pty',
action='store_false',
default=True,
help="do not use pseudo-terminal in run/sudo"
),
make_option('-p', '--password',
default=None,
help="password for use with authentication and/or sudo"
),
make_option('-P', '--parallel',
dest='parallel',
action='store_true',
default=False,
help="default to parallel execution method"
),
make_option('--port',
default=default_port,
help="SSH connection port"
),
make_option('-r', '--reject-unknown-hosts',
action='store_true',
default=False,
help="reject unknown hosts"
),
make_option('--system-known-hosts',
default=None,
help="load system known_hosts file before reading user known_hosts"
),
make_option('-R', '--roles',
default=[],
help="comma-separated list of roles to operate on"
),
make_option('-s', '--shell',
default='/bin/bash -l -c',
help="specify a new shell, defaults to '/bin/bash -l -c'"
),
make_option('--show',
metavar='LEVELS',
help="comma-separated list of output levels to show"
),
make_option('--skip-bad-hosts',
action="store_true",
default=False,
help="skip over hosts that can't be reached"
),
make_option('--skip-unknown-tasks',
action="store_true",
default=False,
help="skip over unknown tasks"
),
make_option('--ssh-config-path',
default=default_ssh_config_path,
metavar='PATH',
help="Path to SSH config file"
),
make_option('-t', '--timeout',
type='int',
default=10,
metavar="N",
help="set connection timeout to N seconds"
),
make_option('-T', '--command-timeout',
dest='command_timeout',
type='int',
default=None,
metavar="N",
help="set remote command timeout to N seconds"
),
make_option('-u', '--user',
default=_get_system_username(),
help="username to use when connecting to remote hosts"
),
make_option('-w', '--warn-only',
action='store_true',
default=False,
help="warn, instead of abort, when commands fail"
),
make_option('-x', '--exclude-hosts',
default=[],
metavar='HOSTS',
help="comma-separated list of hosts to exclude"
),
make_option('-z', '--pool-size',
dest='pool_size',
type='int',
metavar='INT',
default=0,
help="number of concurrent processes to use in parallel mode",
),
]
#
# Environment dictionary - actual dictionary object
#
# Global environment dict. Currently a catchall for everything: config settings
# such as global deep/broad mode, host lists, username etc.
# Most default values are specified in `env_options` above, in the interests of
# preserving DRY: anything in here is generally not settable via the command
# line.
env = _AttributeDict({
'abort_exception': None,
'again_prompt': 'Sorry, try again.',
'all_hosts': [],
'combine_stderr': True,
'colorize_errors': False,
'command': None,
'command_prefixes': [],
'cwd': '', # Must be empty string, not None, for concatenation purposes
'dedupe_hosts': True,
'default_port': default_port,
'eagerly_disconnect': False,
'echo_stdin': True,
'effective_roles': [],
'exclude_hosts': [],
'gateway': None,
'host': None,
'host_string': None,
'lcwd': '', # Must be empty string, not None, for concatenation purposes
'local_user': _get_system_username(),
'output_prefix': True,
'passwords': {},
'path': '',
'path_behavior': 'append',
'port': default_port,
'real_fabfile': None,
'remote_interrupt': None,
'roles': [],
'roledefs': {},
'shell_env': {},
'skip_bad_hosts': False,
'skip_unknown_tasks': False,
'ssh_config_path': default_ssh_config_path,
'ok_ret_codes': [0], # a list of return codes that indicate success
# -S so sudo accepts passwd via stdin, -p with our known-value prompt for
# later detection (thus %s -- gets filled with env.sudo_prompt at runtime)
'sudo_prefix': "sudo -S -p '%(sudo_prompt)s' ",
'sudo_prompt': 'sudo password:',
'sudo_user': None,
'tasks': [],
'prompts': {},
'use_exceptions_for': {'network': False},
'use_shell': True,
'use_ssh_config': False,
'user': None,
'version': get_version('short')
})
# Fill in exceptions settings
exceptions = ['network']
exception_dict = {}
for e in exceptions:
exception_dict[e] = False
env.use_exceptions_for = _AliasDict(exception_dict,
aliases={'everything': exceptions})
# Add in option defaults
for option in env_options:
env[option.dest] = option.default
#
# Command dictionary
#
# Keys are the command/function names, values are the callables themselves.
# This is filled in when main() runs.
commands = {}
#
# Host connection dict/cache
#
connections = HostConnectionCache()
def _open_session():
return connections[env.host_string].get_transport().open_session()
def default_channel():
"""
Return a channel object based on ``env.host_string``.
"""
try:
chan = _open_session()
except ssh.SSHException, err:
if str(err) == 'SSH session not active':
connections[env.host_string].close()
del connections[env.host_string]
chan = _open_session()
else:
raise
chan.settimeout(0.1)
chan.input_enabled = True
return chan
#
# Output controls
#
# Keys are "levels" or "groups" of output, values are always boolean,
# determining whether output falling into the given group is printed or not
# printed.
#
# By default, everything except 'debug' is printed, as this is what the average
# user, and new users, are most likely to expect.
#
# See docs/usage.rst for details on what these levels mean.
output = _AliasDict({
'status': True,
'aborts': True,
'warnings': True,
'running': True,
'stdout': True,
'stderr': True,
'exceptions': False,
'debug': False,
'user': True
}, aliases={
'everything': ['warnings', 'running', 'user', 'output', 'exceptions'],
'output': ['stdout', 'stderr'],
'commands': ['stdout', 'running']
})
| |
# -*- coding: utf-8 -*-
"""
Contains the logic for requesting pieces, as well as that for writing them to disk.
"""
__all__ = ['PieceRequester']
import asyncio
import dataclasses
import logging
from collections import defaultdict
from typing import Optional
import bitstring
from .errors import NonSequentialBlockError
from .messages import Request, Block, Piece
from .metainfo import MetaInfoFile
from .peer_info import PeerInfo
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class WriteBuffer:
buffer = b''
offset = 0
class PieceRequester:
"""
Responsible for requesting pieces from peers.
A single requester is shared between all peers to which
the local peer is connected.
We currently use a naive sequential requesting strategy.
"""
_block_size = 2 ** 14
def __init__(self, torrent: MetaInfoFile, stats):
self.torrent = torrent
# dictionary of peers and indices of the pieces the peer has available
self.peer_piece_map: dict[PeerInfo, set[int]] = defaultdict(set)
# canonical list of unfulfilled requests
self._unfulfilled_requests: list[Request] = []
self._peer_unfulfilled_requests: dict[PeerInfo, set[Request]] = defaultdict(set)
self._stats = stats
def _build_requests(self) -> list[Request]:
"""
Builds the list of unfulfilled requests.
When we need to fill a queue with requests, we just make copies of
requests in our list and mark the ones we have with the peer we send
the request to.
:return: a list of all the requests needed to download the torrent
"""
requests = []
for piece in self.torrent.pieces:
if not piece.complete:
for block in piece.blocks:
requests.append(Request.from_block(block))
return requests
def add_available_piece(self, peer: PeerInfo, index: int):
"""
Called when a peer advertises it has a piece available.
:param peer: The peer that has the piece
:param index: The index of the piece
"""
if not self._unfulfilled_requests:
self._unfulfilled_requests = self._build_requests()
self.peer_piece_map[peer].add(index)
def add_peer_bitfield(self, peer: PeerInfo, bitfield: bitstring.BitArray):
"""
Updates our dictionary of pieces with data from the remote peer
:param peer: The peer who sent this bitfield, kept around
to know where to eventually send requests
:param bitfield: The bitfield sent by the peer
"""
if not self._unfulfilled_requests:
self._unfulfilled_requests = self._build_requests()
for i, b in enumerate(bitfield):
if b:
self.add_available_piece(peer, i)
def peer_is_interesting(self, peer: PeerInfo) -> bool:
"""
Returns whether or not the peer is interesting to us.
We currently check if the peer has at least num_pieces // 2 pieces
that we don't have, unless we only need num_pieces // 4 pieces in
which case the peer is interesting if it has at least 1.
:param peer: The peer we're curious about.
:return: True if the peer is interesting, False otherwise
"""
if peer not in self.peer_piece_map:
return False
needed = set([i for i, piece in enumerate(self.torrent.pieces)
if not piece.complete])
peer_has = set([i for i in self.peer_piece_map[peer] if i in needed])
# if not needed or not peer_has:
# return False
# if len(needed) <= self.torrent.num_pieces // 4:
# return True
# return len(peer_has) >= self.torrent.num_pieces // 2
return len(peer_has) > 0
def remove_requests_for_peer(self, peer: PeerInfo):
"""
Removes all pending requests for a peer.
Called when the peer disconnects or chokes us.
:param peer: peer whose pending requests we should remove
"""
for request in self._peer_unfulfilled_requests[peer]:
request.peer_id = ""
del self._peer_unfulfilled_requests[peer]
def peer_outstanding_requests(self, peer: PeerInfo):
"""
The set of unfulfilled `Request`s we've sent the peer.
:param peer: The peer to retrieve unfulfilled requests for
:return: The set() of unfulfilled `Request`s we've sent the peer.
"""
return self._peer_unfulfilled_requests[peer]
def remove_requests_for_block(self, peer: PeerInfo, block: Block) -> bool:
"""
Removes all pending requests for the given block.
:param peer: `PeerInfo` of the peer who sent the block.
:param block: `Block` to remove from pending requests.
:return: True if removed, False otherwise
"""
if not self._peer_unfulfilled_requests[peer]:
return False
found = False
request = Request.from_block(block)
if request in self._peer_unfulfilled_requests[peer]:
self._peer_unfulfilled_requests[peer].discard(request)
try:
self._unfulfilled_requests.remove(request)
except ValueError:
pass
finally:
return True
return False
def remove_requests_for_piece(self, piece_index: int):
"""
Removes all pending requests with the given piece index.
Called when a completed piece has been received.
:param piece_index: piece index whose requests should be removed
"""
to_discard = []
for i, request in enumerate(self._unfulfilled_requests):
if request.index == piece_index:
to_discard.append(request)
for request_set in self._peer_unfulfilled_requests.values():
request_set.discard(request)
for r in to_discard:
self._unfulfilled_requests.remove(r)
def remove_peer(self, peer: PeerInfo):
"""
Removes a peer from this requester's data structures in the case
that our communication with that peer has stopped
:param peer: peer to remove
"""
if peer in self.peer_piece_map:
del self.peer_piece_map[peer]
self.remove_requests_for_peer(peer)
def fill_peer_request_queue(self, peer: PeerInfo, msg_queue: asyncio.Queue) -> bool:
"""
Fills the given queue with up to 10 new requests for the peer, returning
True if more requests were added or False otherwise.
:param peer: The peer asking for a top up
:param msg_queue: the message queue to place the requests into
:return: True if more requests were added or the peer has any outstanding.
"""
added_more = False
num_needed = 10 - len(self._peer_unfulfilled_requests[peer])
for _ in range(num_needed):
request = self.next_request_for_peer(peer)
if not request: # no more requests for this peer
break
asyncio.create_task(msg_queue.put(request))
added_more = True
return added_more
def next_request_for_peer(self, peer: PeerInfo) -> Optional[Request]:
"""
Finds the next request for the peer.
Searches over each unfulfilled request (currently in order), skipping
those that have been requested from other peers or the peer doesn't have
available until finding a request. The peer is marked as being the requester
and a copy of the request is returned.
:param peer: The peer to retrieve the next request for.
:return: The next `Request` to send, or None if not available.
"""
if peer not in self.peer_piece_map:
return
if len(self.peer_piece_map[peer]) == 0:
return
found_request = None
for request in self._unfulfilled_requests:
if request.peer_id:
continue
if request.index not in self.peer_piece_map[peer]:
continue
request.peer_id = peer.peer_id
self._peer_unfulfilled_requests[peer].add(request)
found_request = request
break
return found_request
def peer_received_block(self, block: Block, peer: PeerInfo) -> Optional[Piece]:
"""
Called when we've received a block from the remote peer.
First, see if there are other blocks from that piece already downloaded.
If so, add this block to the piece and pend a request for the remaining blocks
that we would need.
:param block: The piece message with the data and e'erthang
:param peer: The peer who sent the block
:return: The Piece if the block completes it.
"""
assert peer and block and block.data
block_size = len(block.data)
if block.index >= len(self.torrent.pieces):
logger.debug("Disregarding. Piece %s does not exist." % block.index)
self._stats.torrent_bytes_wasted += block_size
return
piece = self.torrent.pieces[block.index]
if piece.complete:
logger.debug("Disregarding. I already have %s" % block)
self._stats.torrent_bytes_wasted += block_size
return
# Remove the pending requests for this block if there are any
request = Request.from_block(block)
if not self.remove_requests_for_block(peer, block):
logger.debug("Disregarding. I did not request %s" % block)
self._stats.torrent_bytes_wasted += block_size
return
try:
piece.add_block(block)
except NonSequentialBlockError:
# TODO: Handle non-sequential blocks?
logger.error("Block begin index is non-sequential for: %s" % block)
self._stats.torrent_bytes_wasted += block_size
return
if piece.complete:
return self._piece_complete(block)
self._stats.torrent_bytes_downloaded += block_size
return
def _piece_complete(self, block) -> Optional[Piece]:
"""
Called when the last block of a piece has been received.
Validates the piece hash matches, writes the data, and marks the
piece complete.
:param block: the block that completes the piece.
:return: Piece if it was completed.
"""
piece_index = block.index
piece = self.torrent.pieces[piece_index]
if not piece.complete:
self._stats.torrent_bytes_wasted += len(block.data)
return
h = piece.hash()
if h != self.torrent.piece_hashes[piece.index]:
logger.error(
"Hash for received piece %s doesn't match. Received: %s\tExpected: %s" %
(piece.index, h, self.torrent.piece_hashes[piece.index]))
piece.reset()
self._stats.torrent_bytes_wasted += piece.length
else:
logger.info("Completed piece received: %s" % piece)
self.remove_requests_for_piece(piece.index)
return piece
| |
import os
import shutil
import time
from typing import Any, Callable, Dict, List
from unittest import mock
import ujson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import (
check_send_message,
do_change_user_role,
do_set_realm_property,
log_event,
)
from zerver.lib.events import fetch_initial_state_data, get_raw_user_data
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import POSTRequestMock, queries_captured, stub_event_queue_user_events
from zerver.lib.users import get_api_key
from zerver.models import (
Realm,
UserMessage,
UserProfile,
flush_per_request_caches,
get_client,
get_realm,
get_stream,
get_system_bot,
)
from zerver.tornado.event_queue import (
allocate_client_descriptor,
clear_client_event_queues_for_testing,
get_client_info_for_message_event,
process_message_event,
)
from zerver.tornado.views import get_events
from zerver.views.events_register import _default_all_public_streams, _default_narrow
class LogEventsTest(ZulipTestCase):
def test_with_missing_event_log_dir_setting(self) -> None:
with self.settings(EVENT_LOG_DIR=None):
log_event(dict())
def test_log_event_mkdir(self) -> None:
dir_name = os.path.join(settings.TEST_WORKER_DIR, "test-log-dir")
try:
shutil.rmtree(dir_name)
except OSError: # nocoverage
# assume it doesn't exist already
pass
self.assertFalse(os.path.exists(dir_name))
with self.settings(EVENT_LOG_DIR=dir_name):
event: Dict[str, int] = {}
log_event(event)
self.assertTrue(os.path.exists(dir_name))
class EventsEndpointTest(ZulipTestCase):
def test_events_register_endpoint(self) -> None:
# This test is intended to get minimal coverage on the
# events_register code paths
user = self.example_user("hamlet")
with mock.patch('zerver.views.events_register.do_events_register', return_value={}):
result = self.api_post(user, '/json/register')
self.assert_json_success(result)
with mock.patch('zerver.lib.events.request_event_queue', return_value=None):
result = self.api_post(user, '/json/register')
self.assert_json_error(result, "Could not allocate event queue")
return_event_queue = '15:11'
return_user_events: List[Dict[str, Any]] = []
# We choose realm_emoji somewhat randomly--we want
# a "boring" event type for the purpose of this test.
event_type = 'realm_emoji'
test_event = dict(
id=6,
type=event_type,
realm_emoji=[]
)
# Test that call is made to deal with a returning soft deactivated user.
with mock.patch('zerver.lib.events.reactivate_user_if_soft_deactivated') as fa:
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(user, '/json/register', dict(event_types=ujson.dumps([event_type])))
self.assertEqual(fa.call_count, 1)
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(user, '/json/register', dict(event_types=ujson.dumps([event_type])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], -1)
self.assertEqual(result_dict['queue_id'], '15:11')
# Now start simulating returning actual data
return_event_queue = '15:12'
return_user_events = [test_event]
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(user, '/json/register', dict(event_types=ujson.dumps([event_type])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
self.assertEqual(result_dict['queue_id'], '15:12')
# sanity check the data relevant to our event
self.assertEqual(result_dict['realm_emoji'], [])
# Now test with `fetch_event_types` not matching the event
return_event_queue = '15:13'
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(user, '/json/register',
dict(event_types=ujson.dumps([event_type]),
fetch_event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that the message event types data is in there
self.assertIn('max_message_id', result_dict)
# Check that our original event type is not there.
self.assertNotIn(event_type, result_dict)
self.assertEqual(result_dict['queue_id'], '15:13')
# Now test with `fetch_event_types` matching the event
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(user, '/json/register',
dict(fetch_event_types=ujson.dumps([event_type]),
event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that we didn't fetch the messages data
self.assertNotIn('max_message_id', result_dict)
# Check that the realm_emoji data is in there.
self.assertIn('realm_emoji', result_dict)
self.assertEqual(result_dict['realm_emoji'], [])
self.assertEqual(result_dict['queue_id'], '15:13')
def test_tornado_endpoint(self) -> None:
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other',
),
users=[self.example_user('hamlet').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any]) -> HttpResponse:
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
recipient_user_profile = self.example_user('othello')
recipient_email = recipient_user_profile.email
self.login_user(user_profile)
result = self.tornado_call(get_events, user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = '10.01'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id = '10.02'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
def get_message(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
result = self.tornado_call(
get_events,
user_profile,
dict(
apply_markdown=ujson.dumps(apply_markdown),
client_gravatar=ujson.dumps(client_gravatar),
event_types=ujson.dumps(["message"]),
narrow=ujson.dumps([["stream", "denmark"]]),
user_client="website",
dont_block=ujson.dumps(True),
),
)
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_personal_message(user_profile, self.example_user("othello"), "hello")
self.send_stream_message(user_profile, "Denmark", "**hello**")
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
return events[0]['message']
message = get_message(apply_markdown=False, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertTrue(message["avatar_url"].startswith("https://secure.gravatar.com"))
message = get_message(apply_markdown=True, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=False, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(message["avatar_url"], None)
message = get_message(apply_markdown=True, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertEqual(message["avatar_url"], None)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self) -> None:
user_profile = self.example_user('cordelia')
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_api_key(self.notification_bot())
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self) -> None:
user_profile = self.example_user('hamlet')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
self.assertTrue(len(result['realm_bots']) > 2)
def test_max_message_id_with_no_history(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
self.assertEqual(result['max_message_id'], -1)
def test_delivery_email_presence_for_non_admins(self) -> None:
user_profile = self.example_user('aaron')
self.assertFalse(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
def test_delivery_email_presence_for_admins(self) -> None:
user_profile = self.example_user('iago')
self.assertTrue(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False, user_avatar_url_field_optional=False)
for key, value in result['raw_users'].items():
self.assertIn('delivery_email', value)
def test_user_avatar_url_field_optional(self) -> None:
hamlet = self.example_user('hamlet')
users = [
self.example_user('iago'),
self.example_user('cordelia'),
self.example_user('ZOE'),
self.example_user('othello'),
]
for user in users:
user.long_term_idle = True
user.save()
long_term_idle_users_ids = [user.id for user in users]
result = fetch_initial_state_data(user_profile=hamlet,
event_types=None,
queue_id='',
client_gravatar=False,
user_avatar_url_field_optional=True)
raw_users = result['raw_users']
for user_dict in raw_users.values():
if user_dict['user_id'] in long_term_idle_users_ids:
self.assertFalse('avatar_url' in user_dict)
else:
self.assertIsNotNone(user_dict['avatar_url'])
gravatar_users_id = [user_dict['user_id'] for user_dict in raw_users.values()
if 'avatar_url' in user_dict and 'gravatar.com' in user_dict['avatar_url']]
# Test again with client_gravatar = True
result = fetch_initial_state_data(user_profile=hamlet,
event_types=None,
queue_id='',
client_gravatar=True,
user_avatar_url_field_optional=True)
raw_users = result['raw_users']
for user_dict in raw_users.values():
if user_dict['user_id'] in gravatar_users_id:
self.assertIsNone(user_dict['avatar_url'])
else:
self.assertFalse('avatar_url' in user_dict)
class ClientDescriptorsTest(ZulipTestCase):
def test_get_client_info_for_all_public_streams(self) -> None:
hamlet = self.example_user('hamlet')
realm = hamlet.realm
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, True)
self.assertEqual(dct['client'].client_gravatar, True)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], [])
self.assertEqual(dct['is_sender'], False)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
sender_queue_id=client.event_queue.id,
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['is_sender'], True)
def test_get_client_info_for_normal_users(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = hamlet.realm
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
],
)
self.assertEqual(len(client_info), 0)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
dict(id=hamlet.id, flags=['mentioned']),
],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, apply_markdown)
self.assertEqual(dct['client'].client_gravatar, client_gravatar)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], ['mentioned'])
self.assertEqual(dct['is_sender'], False)
test_get_info(apply_markdown=False, client_gravatar=False)
test_get_info(apply_markdown=True, client_gravatar=False)
test_get_info(apply_markdown=False, client_gravatar=True)
test_get_info(apply_markdown=True, client_gravatar=True)
def test_process_message_event_with_mocked_client_info(self) -> None:
hamlet = self.example_user("hamlet")
class MockClient:
def __init__(self, user_profile_id: int,
apply_markdown: bool,
client_gravatar: bool) -> None:
self.user_profile_id = user_profile_id
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.client_type_name = 'whatever'
self.events: List[Dict[str, Any]] = []
def accepts_messages(self) -> bool:
return True
def accepts_event(self, event: Dict[str, Any]) -> bool:
assert(event['type'] == 'message')
return True
def add_event(self, event: Dict[str, Any]) -> None:
self.events.append(event)
client1 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=False,
)
client2 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=False,
)
client3 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=True,
)
client4 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=True,
)
client_info = {
'client:1': dict(
client=client1,
flags=['starred'],
),
'client:2': dict(
client=client2,
flags=['has_alert_word'],
),
'client:3': dict(
client=client3,
flags=[],
),
'client:4': dict(
client=client4,
flags=[],
),
}
sender = hamlet
message_event = dict(
message_dict=dict(
id=999,
content='**hello**',
rendered_content='<b>hello</b>',
sender_id=sender.id,
type='stream',
client='website',
# NOTE: Some of these fields are clutter, but some
# will be useful when we let clients specify
# that they can compute their own gravatar URLs.
sender_email=sender.email,
sender_delivery_email=sender.delivery_email,
sender_realm_id=sender.realm_id,
sender_avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
sender_avatar_version=1,
sender_is_mirror_dummy=None,
recipient_type=None,
recipient_type_id=None,
),
)
# Setting users to `[]` bypasses code we don't care about
# for this test--we assume client_info is correct in our mocks,
# and we are interested in how messages are put on event queue.
users: List[Dict[str, Any]] = []
with mock.patch('zerver.tornado.event_queue.get_client_info_for_message_event',
return_value=client_info):
process_message_event(message_event, users)
# We are not closely examining avatar_url at this point, so
# just sanity check them and then delete the keys so that
# upcoming comparisons work.
for client in [client1, client2]:
message = client.events[0]['message']
self.assertIn('gravatar.com', message['avatar_url'])
message.pop('avatar_url')
self.assertEqual(client1.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=['starred'],
),
])
self.assertEqual(client2.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=['has_alert_word'],
),
])
self.assertEqual(client3.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=[],
),
])
self.assertEqual(client4.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=[],
),
])
class FetchQueriesTest(ZulipTestCase):
def test_queries(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
flush_per_request_caches()
with queries_captured() as queries:
with mock.patch('zerver.lib.events.always_want') as want_mock:
fetch_initial_state_data(
user_profile=user,
event_types=None,
queue_id='x',
client_gravatar=False,
user_avatar_url_field_optional=False
)
self.assert_length(queries, 30)
expected_counts = dict(
alert_words=1,
custom_profile_fields=1,
default_streams=1,
default_stream_groups=1,
hotspots=0,
message=1,
muted_topics=1,
presence=1,
realm=0,
realm_bot=1,
realm_domains=1,
realm_embedded_bots=0,
realm_incoming_webhook_bots=0,
realm_emoji=1,
realm_filters=1,
realm_user=3,
realm_user_groups=2,
recent_private_conversations=1,
starred_messages=1,
stream=2,
stop_words=0,
subscription=5,
update_display_settings=0,
update_global_notifications=0,
update_message_flags=5,
user_status=1,
video_calls=0,
)
wanted_event_types = {
item[0][0] for item
in want_mock.call_args_list
}
self.assertEqual(wanted_event_types, set(expected_counts))
for event_type in sorted(wanted_event_types):
count = expected_counts[event_type]
flush_per_request_caches()
with queries_captured() as queries:
if event_type == 'update_message_flags':
event_types = ['update_message_flags', 'message']
else:
event_types = [event_type]
fetch_initial_state_data(
user_profile=user,
event_types=event_types,
queue_id='x',
client_gravatar=False,
user_avatar_url_field_optional=False
)
self.assert_length(queries, count)
class TestEventsRegisterAllPublicStreamsDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_use_passed_all_public_true_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [['stream', 'my_stream']])
self.assertEqual(result, [['stream', 'my_stream']])
def test_use_passed_narrow_with_default(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [['stream', 'my_stream']])
self.assertEqual(result, [['stream', 'my_stream']])
def test_use_default_if_narrow_is_empty(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [['stream', 'Verona']])
def test_use_narrow_if_default_is_none(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
class TestGetRawUserDataSystemBotRealm(ZulipTestCase):
def test_get_raw_user_data_on_system_bot_realm(self) -> None:
result = get_raw_user_data(get_realm("zulipinternal"), self.example_user('hamlet'),
client_gravatar=True, user_avatar_url_field_optional=True)
for bot_email in settings.CROSS_REALM_BOT_EMAILS:
bot_profile = get_system_bot(bot_email)
self.assertTrue(bot_profile.id in result)
self.assertTrue(result[bot_profile.id]['is_cross_realm_bot'])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
========================================================================================
# This small script subscribes to the FeedbackMsg message of teb_local_planner
# and converts the current scene to a svg-image
# publish_feedback must be turned on such that the planner publishes this information.
# Author: christoph.roesmann@tu-dortmund.de
It is recommendable to start this node after initialization of TEB is completed.
Requirements:
svgwrite: A Python library to create SVG drawings. http://pypi.python.org/pypi/svgwrite
=======================================================================================
"""
import roslib;
import rospy
import svgwrite
import math
import time
import random
from svgwrite import cm, mm
from teb_local_planner.msg import FeedbackMsg, TrajectoryMsg, TrajectoryPointMsg
from geometry_msgs.msg import PolygonStamped, Point32, Quaternion
# ================= PARAMETERS ==================
# TODO: In case of a more general node, change parameter to ros-parameter
# Drawing parameters:
SCALE = 200 # Overall scaling: 100 pixel = 1 m
MIN_POSE_DISTANCE = 0.3 # Distance between two consecutive poses in SVG-image
SCALE_VELOCITY_VEC = 0.4 # Scaling of velocity vectors -> 1 cell = 1/SCALE_VELOCITY_VEC m/s
GRID_X_MIN = -2 # Define, how many cells your grid should contain in each direction.
GRID_X_MAX = 2
GRID_Y_MIN = -2
GRID_Y_MAX = 1
# TEB parameters:
OBSTACLE_DIST = 50 *SCALE/100 # cm
# ================= FUNCTIONS ===================
def sign(number):
"""
Signum function: get sign of a number
@param number: get sign of this number
@type number: numeric type (eg. integer)
@return: sign of number
@rtype: integer {1, -1, 0}
"""
return cmp(number,0)
def arrowMarker(color='green', orientation='auto'):
"""
Create an arrow marker with svgwrite
@return: arrow marker
@rtype: svg_write marker object
"""
arrow = svg.marker(insert=(1,5), size=(4,3), orient=orientation)
arrow.viewbox(width=10, height=10)
arrow.add(svg.polyline([(0,0),(10,5),(0,10),(1,5)], fill=color, opacity=1.0))
svg.defs.add(arrow)
return arrow
def quaternion2YawDegree(orientation):
"""
Get yaw angle [degree] from quaternion representation
@param orientation: orientation in quaternions to read from
@type orientation: geometry_msgs/Quaternion
@return: yaw angle [degree]
@rtype: float
"""
yawRad = math.atan2(2*(orientation.x*orientation.y+orientation.z*orientation.w),1-2*(pow(orientation.y,2)+pow(orientation.z,2)))
return yawRad*180/math.pi
def feedback_callback(data):
"""
Callback for receiving TEB and obstacle information
@param data: Received feedback message
@type data: visualization_msgs/Marker
@globalparam tebList: Received TEB List
@globaltype tebList: teb_local_planner/FeedbackMsg
"""
# TODO: Remove global variables
global feedbackMsg
if not feedbackMsg:
feedbackMsg = data
rospy.loginfo("TEB feedback message received...")
# ================ MAIN FUNCTION ================
if __name__ == '__main__':
rospy.init_node('export_to_svg', anonymous=True)
topic_name = "/test_optim_node/teb_feedback" # define feedback topic here!
rospy.Subscriber(topic_name, FeedbackMsg, feedback_callback, queue_size = 1)
rospy.loginfo("Waiting for feedback message on topic %s.", topic_name)
rate = rospy.Rate(10.0)
feedbackMsg = []
timestr = time.strftime("%Y%m%d_%H%M%S")
filename_string = "teb_svg_" + timestr + '.svg'
rospy.loginfo("SVG will be written to '%s'.", filename_string)
random.seed(0)
svg=svgwrite.Drawing(filename=filename_string, debug=True)
# Create viewbox -> this box defines the size of the visible drawing
svg.viewbox(GRID_X_MIN*SCALE-1*SCALE,GRID_Y_MIN*SCALE-1*SCALE,GRID_X_MAX*SCALE-GRID_X_MIN*SCALE+2*SCALE,GRID_Y_MAX*SCALE-GRID_Y_MIN*SCALE+2*SCALE)
# Draw grid:
hLines = svg.add(svg.g(id='hLines', stroke='black'))
hLines.add(svg.line(start=(GRID_X_MIN*SCALE, 0), end=(GRID_X_MAX*SCALE, 0)))
for y in range(GRID_Y_MAX):
hLines.add(svg.line(start=(GRID_X_MIN*SCALE, SCALE+y*SCALE), end=(GRID_X_MAX*SCALE, SCALE+y*SCALE)))
for y in range(-GRID_Y_MIN):
hLines.add(svg.line(start=(GRID_X_MIN*SCALE, -SCALE-y*SCALE), end=(GRID_X_MAX*SCALE, -SCALE-y*SCALE)))
vLines = svg.add(svg.g(id='vline', stroke='black'))
vLines.add(svg.line(start=(0, GRID_Y_MIN*SCALE), end=(0, GRID_Y_MAX*SCALE)))
for x in range(GRID_X_MAX):
vLines.add(svg.line(start=(SCALE+x*SCALE, GRID_Y_MIN*SCALE), end=(SCALE+x*SCALE, GRID_Y_MAX*SCALE)))
for x in range(-GRID_X_MIN):
vLines.add(svg.line(start=(-SCALE-x*SCALE, GRID_Y_MIN*SCALE), end=(-SCALE-x*SCALE, GRID_Y_MAX*SCALE)))
# Draw legend:
legend = svg.g(id='legend', font_size=25)
stringGeometry = "Geometry: 1 Unit = 1.0m"
legendGeometry = svg.text(stringGeometry)
legend.add(legendGeometry)
legend.translate(tx=GRID_X_MIN*SCALE, ty=GRID_Y_MAX*SCALE + 30) # Move legend to buttom left corner
svg.add(legend)
#arrow = arrowMarker() # Init arrow marker
rospy.loginfo("Initialization completed.\nWaiting for feedback message...")
# -------------------- WAIT FOR CALLBACKS --------------------------
while not rospy.is_shutdown():
if feedbackMsg:
break # Leave loop after receiving all necessary TEB information (see callbacks) to finish drawing
rate.sleep()
# ------------------------------------------------------------------
if not feedbackMsg.trajectories:
rospy.loginfo("Received message does not contain trajectories. Shutting down...")
sys.exit()
if len(feedbackMsg.trajectories[0].trajectory) < 2:
rospy.loginfo("Received message does not contain trajectories with at least two states (start and goal). Shutting down...")
sys.exit()
# iterate trajectories
for index, traj in enumerate(feedbackMsg.trajectories):
#color
traj_color = svgwrite.rgb(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 'RGB')
# Iterate through TEB positions -> Draw Paths
points = []
for point in traj.trajectory:
points.append( (point.pose.position.x*SCALE,-point.pose.position.y*SCALE) ) # y is negative in image coordinates
# svgwrite rotates clockwise!
if index == feedbackMsg.selected_trajectory_idx: # highlight currently selected teb
line = svg.add( svg.polyline(points=points, fill='none', stroke=traj_color, stroke_width=10, stroke_linecap='round', \
stroke_linejoin='round', opacity=1.0 ) )
else:
line = svg.add( svg.polyline(points=points, fill='none', stroke=traj_color, stroke_width=10, stroke_linecap='butt', \
stroke_linejoin='round', stroke_dasharray='10,3', opacity=1.0 ) )
#marker_points = points[::7]
#markerline = svg.add( svg.polyline(points=marker_points, fill='none', stroke=traj_color, stroke_width=10, opacity=0.0 ) )
#arrow = arrowMarker(traj_color)
#markerline.set_markers( (arrow, arrow, arrow) )
#line.set_markers( (arrow, arrow, arrow) )
#line['marker-start'] = arrow.get_funciri()
# Add Start and Goal Point
start_pose = feedbackMsg.trajectories[0].trajectory[0].pose
goal_pose = feedbackMsg.trajectories[0].trajectory[len(feedbackMsg.trajectories[0].trajectory)-1].pose
start_position = start_pose.position
goal_position = goal_pose.position
svg.add(svg.circle(center=(start_position.x*SCALE,-start_position.y*SCALE), r=10, stroke_width=1, stroke='blue', fill ='blue'))
svg.add(svg.text("Start", (start_position.x*SCALE-70, -start_position.y*SCALE+45), font_size=35)) # Add label
svg.add(svg.circle(center=(goal_position.x*SCALE,-goal_position.y*SCALE), r=10, stroke_width=1, stroke='red', fill ='red'))
svg.add(svg.text("Goal", (goal_position.x*SCALE-40, -goal_position.y*SCALE+45), font_size=35)) # Add label
# draw start arrow
start_arrow = svg.polyline([(0,-1),(6,-1),(5,-5),(15,0),(5,5),(6,1),(0,1)], fill='blue', opacity=1.0)
start_arrow.translate(start_position.x*SCALE,-start_position.y*SCALE)
start_arrow.rotate( quaternion2YawDegree(start_pose.orientation) )
start_arrow.scale(3)
svg.add(start_arrow)
# draw goal arrow
goal_arrow = svg.polyline([(0,-1),(6,-1),(5,-5),(15,0),(5,5),(6,1),(0,1)], fill='red', opacity=1.0)
goal_arrow.translate(goal_position.x*SCALE,-goal_position.y*SCALE)
goal_arrow.rotate( quaternion2YawDegree(goal_pose.orientation) )
goal_arrow.scale(3)
svg.add(goal_arrow)
# Draw obstacles
for obstacle in feedbackMsg.obstacles:
if len(obstacle.polygon.points) == 1: # point obstacle
point = obstacle.polygon.points[0]
svg.add(svg.circle(center=(point.x*SCALE,-point.y*SCALE), r=OBSTACLE_DIST, stroke_width=1, stroke='grey', fill ='grey', opacity=0.3))
svg.add(svg.circle(center=(point.x*SCALE,-point.y*SCALE), r=15, stroke_width=1, stroke='black', fill ='black'))
svg.add(svg.text("Obstacle", (point.x*SCALE-70, -point.y*SCALE+45), font_size=35)) # Add label
if len(obstacle.polygon.points) == 2: # line obstacle
line_start = obstacle.polygon.points[0]
line_end = obstacle.polygon.points[1]
svg.add(svg.line(start=(line_start.x*SCALE,-line_start.y*SCALE), end=(line_end.x*SCALE,-line_end.y*SCALE), stroke='black', fill='gray', stroke_width=1, opacity=1.0))
svg.add(svg.text("Obstacle", (line_start.x*SCALE-70, -line_start.y*SCALE+45), font_size=35)) # Add label
if len(obstacle.polygon.points) > 2: # polygon obstacle
vertices = []
for point in obstacle.polygon.points:
vertices.append((point.x*SCALE, -point.y*SCALE))
svg.add(svg.polygon(points=vertices, stroke='black', fill='gray', stroke_width=1, opacity=1.0))
svg.add(svg.text("Obstacle", (obstacle.polygon.points[0].x*SCALE-70, -obstacle.polygon.points.y*SCALE+45), font_size=35)) # Add label
# Save svg to file (svg_output.svg) and exit node
svg.save()
rospy.loginfo("Drawing completed.")
| |
# -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
from __future__ import absolute_import
import re
import os
import sys
import optparse
import time
from os import path
from io import open
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
except ImportError:
pass
from six import PY2, PY3, text_type, binary_type
from six.moves import input
from six.moves.urllib.parse import quote as urlquote
from docutils.utils import column_width
from sphinx import __display_version__, package_dir
from sphinx.util.osutil import make_filename
from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util.template import SphinxRenderer
from sphinx.util import texescape
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
DEFAULT_VALUE = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'epub': False,
'ext_autodoc': False,
'ext_doctest': False,
'ext_todo': False,
'makefile': True,
'batchfile': True,
}
EXTENSIONS = ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
'imgmath', 'mathjax', 'ifconfig', 'viewcode', 'githubpages')
PROMPT_PREFIX = '> '
def mkdir_p(dir):
if path.isdir(dir):
return
os.makedirs(dir)
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
print(prompt, end='')
return input('')
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x):
x = path.expanduser(x)
if path.exists(x) and not path.isdir(x):
raise ValidationError("Please enter a valid path name.")
return x
def allow_empty(x):
return x
def nonempty(x):
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
def val(x):
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
return val
def boolean(x):
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
return x
def ok(x):
return x
def term_decode(text):
if isinstance(text, text_type):
return text
# for Python 2.x, try to get a Unicode string out of it
if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
return text
if TERM_ENCODING:
text = text.decode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII characters entered '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default is not None:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if PY2:
# for Python 2.x, try to get a Unicode string out of it
if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
!= prompt:
if TERM_ENCODING:
prompt = prompt.encode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII default value provided '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
prompt = prompt.encode('utf-8')
except UnicodeEncodeError:
prompt = prompt.encode('latin1')
prompt = purple(prompt)
x = term_input(prompt).strip()
if default and not x:
x = default
x = term_decode(x)
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
d[key] = x
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
# remove Unicode literal prefixes
if PY3:
return rex.sub('\\1', source)
else:
return source
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
self.templatedir = templatedir or ''
super(QuickstartRenderer, self).__init__()
def render(self, template_name, context):
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
else:
return super(QuickstartRenderer, self).render(template_name, context)
def ask_user(d):
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* epub: use epub (bool)
* ext_*: extensions to use (bools)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
print('''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).''')
if 'path' in d:
print(bold('''
Selected root path: %s''' % d['path']))
else:
print('''
Enter the root path for documentation.''')
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold('Error: an existing conf.py has been found in the '
'selected root path.'))
print('sphinx-quickstart will not overwrite existing Sphinx projects.')
print()
do_prompt(d, 'path', 'Please enter a new root path (or just Enter '
'to exit)', '', is_path)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print('''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory "_build" within the root path, or you separate
"source" and "build" directories within the root path.''')
do_prompt(d, 'sep', 'Separate source and build directories (y/n)', 'n',
boolean)
if 'dot' not in d:
print('''
Inside the root directory, two more directories will be created; "_templates"
for custom HTML templates and "_static" for custom stylesheets and other static
files. You can enter another prefix (such as ".") to replace the underscore.''')
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok)
if 'project' not in d:
print('''
The project name will occur in several places in the built documentation.''')
do_prompt(d, 'project', 'Project name')
if 'author' not in d:
do_prompt(d, 'author', 'Author name(s)')
if 'version' not in d:
print('''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.''')
do_prompt(d, 'version', 'Project version', '', allow_empty)
if 'release' not in d:
do_prompt(d, 'release', 'Project release', d['version'], allow_empty)
if 'language' not in d:
print('''
If the documents are to be written in a language other than English,
you can select a language here by its language code. Sphinx will then
translate text that it generates into that language.
For a list of supported codes, see
http://sphinx-doc.org/config.html#confval-language.''')
do_prompt(d, 'language', 'Project language', 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print('''
The file name suffix for source files. Commonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.''')
do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
if 'master' not in d:
print('''
One document is special in that it is considered the top node of the
"contents tree", that is, it is the root of the hierarchical structure
of the documents. Normally, this is "index", but if your "index"
document is a custom template, you can also set this to another filename.''')
do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index')
while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print()
print(bold('Error: the master file %s has already been found in the '
'selected root path.' % (d['master'] + d['suffix'])))
print('sphinx-quickstart will not overwrite the existing file.')
print()
do_prompt(d, 'master', 'Please enter a new file name, or rename the '
'existing file and press Enter', d['master'])
if 'epub' not in d:
print('''
Sphinx can also add configuration for epub output:''')
do_prompt(d, 'epub', 'Do you want to use the epub builder (y/n)',
'n', boolean)
if 'ext_autodoc' not in d:
print('''
Please indicate if you want to use one of the following Sphinx extensions:''')
do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
'from modules (y/n)', 'n', boolean)
if 'ext_doctest' not in d:
do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
'in doctest blocks (y/n)', 'n', boolean)
if 'ext_intersphinx' not in d:
do_prompt(d, 'ext_intersphinx', 'intersphinx: link between Sphinx '
'documentation of different projects (y/n)', 'n', boolean)
if 'ext_todo' not in d:
do_prompt(d, 'ext_todo', 'todo: write "todo" entries '
'that can be shown or hidden on build (y/n)', 'n', boolean)
if 'ext_coverage' not in d:
do_prompt(d, 'ext_coverage', 'coverage: checks for documentation '
'coverage (y/n)', 'n', boolean)
if 'ext_imgmath' not in d:
do_prompt(d, 'ext_imgmath', 'imgmath: include math, rendered '
'as PNG or SVG images (y/n)', 'n', boolean)
if 'ext_mathjax' not in d:
do_prompt(d, 'ext_mathjax', 'mathjax: include math, rendered in the '
'browser by MathJax (y/n)', 'n', boolean)
if d['ext_imgmath'] and d['ext_mathjax']:
print('''Note: imgmath and mathjax cannot be enabled at the same time.
imgmath has been deselected.''')
d['ext_imgmath'] = False
if 'ext_ifconfig' not in d:
do_prompt(d, 'ext_ifconfig', 'ifconfig: conditional inclusion of '
'content based on config values (y/n)', 'n', boolean)
if 'ext_viewcode' not in d:
do_prompt(d, 'ext_viewcode', 'viewcode: include links to the source '
'code of documented Python objects (y/n)', 'n', boolean)
if 'ext_githubpages' not in d:
do_prompt(d, 'ext_githubpages', 'githubpages: create .nojekyll file '
'to publish the document on GitHub pages (y/n)', 'n', boolean)
if 'no_makefile' in d:
d['makefile'] = False
elif 'makefile' not in d:
print('''
A Makefile and a Windows command file can be generated for you so that you
only have to run e.g. `make html' instead of invoking sphinx-build
directly.''')
do_prompt(d, 'makefile', 'Create Makefile? (y/n)', 'y', boolean)
if 'no_batchfile' in d:
d['batchfile'] = False
elif 'batchfile' not in d:
do_prompt(d, 'batchfile', 'Create Windows command file? (y/n)',
'y', boolean)
print()
def generate(d, overwrite=True, silent=False, templatedir=None):
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
texescape.init()
indent = ' ' * 4
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['PY3'] = PY3
d['project_fn'] = make_filename(d['project'])
d['project_url'] = urlquote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d.setdefault('extensions', [])
for name in EXTENSIONS:
if d.get('ext_' + name):
d['extensions'].append('sphinx.ext.' + name)
d['extensions'] = (',\n' + indent).join(repr(name) for name in d['extensions'])
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d['author_texescaped'] = text_type(d['author']).\
translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
# a Python string literal
for key in ('project', 'project_doc', 'project_doc_texescaped',
'author', 'author_texescaped', 'copyright',
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
if not path.isdir(d['path']):
mkdir_p(d['path'])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
mkdir_p(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
exclude_patterns = map(repr, [
d['dot'] + 'build',
'Thumbs.db', '.DS_Store',
])
d['exclude_patterns'] = ', '.join(exclude_patterns)
mkdir_p(builddir)
mkdir_p(path.join(srcdir, d['dot'] + 'templates'))
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
f.write(content)
else:
print('File %s already exists, skipping.' % fpath)
conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
if not conf_path or not path.isfile(conf_path):
conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
with open(conf_path) as f:
conf_text = convert_python_source(f.read())
write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
masterfile = path.join(srcdir, d['master'] + d['suffix'])
write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
if d.get('make_mode') is True:
makefile_template = 'quickstart/Makefile.new_t'
batchfile_template = 'quickstart/make.bat.new_t'
else:
makefile_template = 'quickstart/Makefile_t'
batchfile_template = 'quickstart/make.bat_t'
if d['makefile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'),
template.render(makefile_template, d), u'\n')
if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'),
template.render(batchfile_template, d), u'\r\n')
if silent:
return
print()
print(bold('Finished: An initial directory structure has been created.'))
print('''
You should now populate your master file %s and create other documentation
source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
Use the Makefile to build the docs, like so:
make builder
''' or '''\
Use the sphinx-build command to build the docs, like so:
sphinx-build -b builder %s %s
''' % (srcdir, builddir)) + '''\
where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
''')
def usage(argv, msg=None):
if msg:
print(msg, file=sys.stderr)
print(file=sys.stderr)
USAGE = """\
Sphinx v%s
Usage: %%prog [options] [projectdir]
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
def valid_dir(d):
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def main(argv=sys.argv):
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG,
version='Sphinx v%s' % __display_version__,
formatter=MyFormatter())
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
default=False,
help='quiet mode')
group = parser.add_option_group('Structure options')
group.add_option('--sep', action='store_true', dest='sep',
help='if specified, separate source and build dirs')
group.add_option('--dot', metavar='DOT', dest='dot',
help='replacement for dot in _templates etc.')
group = parser.add_option_group('Project basic options')
group.add_option('-p', '--project', metavar='PROJECT', dest='project',
help='project name')
group.add_option('-a', '--author', metavar='AUTHOR', dest='author',
help='author names')
group.add_option('-v', metavar='VERSION', dest='version',
help='version of project')
group.add_option('-r', '--release', metavar='RELEASE', dest='release',
help='release of project')
group.add_option('-l', '--language', metavar='LANGUAGE', dest='language',
help='document language')
group.add_option('--suffix', metavar='SUFFIX', dest='suffix',
help='source file suffix')
group.add_option('--master', metavar='MASTER', dest='master',
help='master document name')
group.add_option('--epub', action='store_true', dest='epub',
default=False,
help='use epub')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
group.add_option('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help='enable extensions')
group = parser.add_option_group('Makefile and Batchfile creation')
group.add_option('--makefile', action='store_true', dest='makefile',
default=False,
help='create makefile')
group.add_option('--no-makefile', action='store_true', dest='no_makefile',
default=False,
help='not create makefile')
group.add_option('--batchfile', action='store_true', dest='batchfile',
default=False,
help='create batchfile')
group.add_option('--no-batchfile', action='store_true', dest='no_batchfile',
default=False,
help='not create batchfile')
group.add_option('-M', '--no-use-make-mode', action='store_false', dest='make_mode',
help='not use make-mode for Makefile/make.bat')
group.add_option('-m', '--use-make-mode', action='store_true', dest='make_mode',
default=True,
help='use make-mode for Makefile/make.bat')
group = parser.add_option_group('Project templating')
group.add_option('-t', '--templatedir', metavar='TEMPLATEDIR', dest='templatedir',
help='template directory for template files')
group.add_option('-d', metavar='NAME=VALUE', action='append', dest='variables',
help='define a template variable')
# parse options
try:
opts, args = parser.parse_args(argv[1:])
except SystemExit as err:
return err.code
if len(args) > 0:
opts.ensure_value('path', args[0])
d = vars(opts)
# delete None or False value
d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
try:
if 'quiet' in d:
if not set(['project', 'author']).issubset(d):
print('''"quiet" is specified, but any of "project" or \
"author" is not specified.''')
return 1
if set(['quiet', 'project', 'author']).issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('version', '')
d.setdefault('release', d['version'])
d2 = DEFAULT_VALUE.copy()
d2.update(dict(("ext_" + ext, False) for ext in EXTENSIONS))
d2.update(d)
d = d2
if 'no_makefile' in d:
d['makefile'] = False
if 'no_batchfile' in d:
d['batchfile'] = False
if not valid_dir(d):
print()
print(bold('Error: specified path is not a directory, or sphinx'
' files already exist.'))
print('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.')
return 1
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return 130 # 128 + SIGINT
# decode values in d if value is a Python string literal
for key, value in d.items():
if isinstance(value, binary_type):
d[key] = term_decode(value)
# parse extensions list
d.setdefault('extensions', [])
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
for modname in ext.split(','):
d['extensions'].append(modname)
for variable in d.get('variables', []):
try:
name, value = variable.split('=')
d[name] = value
except ValueError:
print('Invalid template variable: %s' % variable)
generate(d, templatedir=opts.templatedir)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
from google.appengine.ext import db
import lp.error
import lp.time
import re
class Entry(db.Model):
#id = 1
title = db.StringProperty(required = True, default = "(Untitled)", indexed = False)
published = db.DateTimeProperty()
updated = db.DateTimeProperty(auto_now = True)
tags = db.StringListProperty()
content = db.TextProperty()
public = db.BooleanProperty() # None = Draft, True = Public, False = Private
attachments = db.IntegerProperty (default=0, indexed=True)
@classmethod
def get_by_query(cls, query = {}):
if 'id' in query:
result = [cls.get_by_id(query['id']), ]
else:
q = cls.all()
if 'order' in query:
q.order(query['order'])
else:
q.order("-published")
if 'public' in query:
q.filter("public = ", query['public'])
if 'tags' in query:
q.filter("tags IN ", query['tags'])
if 'start_time' in query:
q.filter("published >= ", lp.time.changetz(query['start_time'], '+0000'))
if 'end_time' in query:
q.filter("published < ", lp.time.changetz(query['end_time'], '+0000'))
if 'limit' in query:
limit = query['limit']
else:
limit = 25
if 'page' in query:
offset = limit * (query['page'] - 1)
else:
offset = 0
result = q.fetch(limit, offset)
total = q.count()
for i in range(len(result)):
result[i].id = result[i].key().id()
if 'time_offset' in query:
result[i].published = lp.time.changetz(result[i].published, query['time_offset'])
if 'id' in query:
return result[0]
else:
return (result, total)
@classmethod
def save(cls, entries):
if not isinstance(entries, (list, tuple)):
entries = [entries,]
for i in range(len(entries)):
entries[i].published = lp.time.changetz(entries[i].published, '+0000')
for tag in entries[i].tags:
if ' ' in tag:
raise lp.error.ValidationError("Tag cannot contain spaces.")
# This is just a safety validation. The Handler should be responsible for the validity of tag string.
return db.put(entries)
@classmethod
def delete_by_id(cls, ids):
keys = [db.Key.from_path(cls.kind(), id, parent=None) for id in ids]
db.delete(keys)
@classmethod
def delete(cls, models):
db.delete(models)
class Setting(db.Model):
#key_name = "time_offset"
value = db.StringProperty (required=False, indexed=False)
KEY_LIST = {
# Allowed keys and their default values.
# Don't forget to modify Setting.put() if you change this.
'remote_publish_protocol': 'aws3',
'remote_publish_url': '',
'accept_email_with': '',
'accept_email_from': '',
'time_offset': '+0000'
}
@classmethod
def get_by_keynames(cls, keynames = None):
""" Extended from db.Model.get(). Will create a Setting in its default values if key_name is not found.
Args:
keyname: a string of a key_name, or list of key_names.
Returns:
a Setting instance or a list of Setting instances.
"""
if keynames == None:
keynames = cls.KEY_LIST.keys()
result = cls.get_by_key_name(keynames)
try: # if iterable
if None in result: # skip the loop if all key_name are present
missing = []
for (offset, key) in enumerate(keynames): #TODO: Possible optimization by converting to list comprehension?
if result[offset] == None:
result[offset] = Setting(key_name=key, value=cls.KEY_LIST[key])
missing.append(result[offset])
db.put(missing)
except TypeError: # if not iterable
#TODO: is kind of braching is hard to understand. Consider revising using isinstance()?
if result == None:
result = Setting(key_name=str(keynames), value=cls.KEY_LIST[str(keynames)])
db.put(result)
return result
@classmethod
def get_in_dict(cls, keynames = None):
"""
Extended from Setting.get_by_keynames(). Returns a Dict of key_name / value pairs."""
# TODO: memcache
if keynames == None:
keynames = cls.KEY_LIST.keys()
result = cls.get_by_keynames(keynames)
output = {}
try:
iter(result)
except TypeError: # if not iterable
result = [result,]
for item in result:
output[item.key().name()] = item.value
return output
@classmethod
def save(cls, settings):
""" Update or create Setting entries after validating values.
Note: We don't choose the db.StringProperty(validator = ***) way because there is no way to set a validator to key_name.
Args:
settings: a Setting object or a list of Setting objects.
Returns:
a Key object or a List of Key objects of the updated or created entries.
Raise:
ValidationError: if the provided Setting.value fails to conform the formating rules.
"""
if not isinstance(settings, (list, tuple)):
settings = [settings,]
for setting in settings:
keyname = setting.key().name()
if keyname in cls.KEY_LIST:
#URL check
if keyname == 'remote_publish_url':
if setting.value != '' and not re.match('^(http|https|ftp|file)\://.+', setting.value):
raise lp.error.ValidationError("%s is not a valid URL." % setting.value)
#Email check
elif keyname == 'accept_email_from' or keyname == 'accept_email_with':
for email in setting.value.split(' '):
if email != '' and not re.match('^[a-zA-Z][\w\.]+@([\w\-]+\.)+[a-zA-Z]{2,7}$', email):
raise lp.error.ValidationError("%s is not a valid email address." % email)
elif keyname == 'time_offset':
if not re.match('^[\+\-][0-9]{4}', setting.value) or int(setting.value[1:3]) > 23 or int(setting.value[3:5]) > 59:
raise lp.error.ValidationError("%s is not a valid time zone format. i.e: between -2359 and +2359" % setting.value)
else: # Drop keynames that are not in KEY_LIST
settings.remove(setting)
db.put(settings)
class Archive(db.Model):
#key_name = "201009"
count = db.IntegerProperty (default=1, indexed=True)
class Tag(db.Model):
#key_name = "Life"
count = db.IntegerProperty (default=1, indexed=True)
class Collection(db.Model):
#key_name = "Anthology"
slug = db.StringProperty(required = True)
items = db.ListProperty(int)
@classmethod
def save(cls, collections):
db.put(collections)
@classmethod
def delete_by_key_name(cls, keynames):
keys = [db.Key.from_path(cls.kind(), keyname, parent=None) for keyname in keynames]
db.delete(keys)
| |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
from __future__ import unicode_literals
import functools
from importlib import import_module
import re
from threading import local
import warnings
from django.http import Http404
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import lazy
from django.utils.http import urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils import six, lru_cache
from django.utils.translation import get_language
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
self.app_name = app_name
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_name=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_name, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_view
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is a string representing the module containing URLconfs.
self.urlconf_name = urlconf_name
if not isinstance(urlconf_name, six.string_types):
self._urlconf_module = self.urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if hasattr(pattern, '_callback_str'):
self._callback_strs.add(pattern._callback_str)
elif hasattr(pattern, '_callback'):
callback = pattern._callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
lookup_str = callback.__module__ + "." + callback.__class__.__name__
else:
lookup_str = callback.__module__ + "." + callback.__name__
self._callback_strs.add(lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs)))
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
return ResolverMatch(sub_match.func, sub_match.args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@property
def urlconf_module(self):
try:
return self._urlconf_module
except AttributeError:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module
@property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = dict((k, force_text(v)) for (k, v) in kwargs.items())
if not self._populated:
self._populate()
original_lookup = lookup_view
try:
if lookup_view in self._callback_strs:
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
else:
if not callable(original_lookup) and callable(lookup_view):
warnings.warn(
'Reversing by dotted path is deprecated (%s).' % original_lookup,
RemovedInDjango20Warning, stacklevel=3
)
possibilities = self.reverse_dict.getlist(lookup_view)
prefix_norm, prefix_args = normalize(urlquote(_prefix))[0]
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params) + len(prefix_args):
continue
candidate_subs = dict(zip(prefix_args + params, text_args))
else:
if set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys()) | set(prefix_args):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = prefix_norm.replace('%', '%%') + result
if re.search('^%s%s' % (prefix_norm, pattern), candidate_pat % candidate_subs, re.UNICODE):
candidate_subs = dict((k, urlquote(v)) for (k, v) in candidate_subs.items())
return candidate_pat % candidate_subs
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))
reverse_lazy = lazy(reverse, str)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
| |
# MIT License
# Copyright (c) 2016 Diogo Dutra <dutradda@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from copy import deepcopy
import sqlalchemy as sa
from jsonschema import ValidationError, validate
from jsonschema.validators import Draft4Validator, create
from myreco.engine_strategies.filters.filters import BooleanFilterBy
from myreco.item_types._store_items_model_meta import _StoreItemsModelBaseMeta
from myreco.utils import ModuleObjectLoader, build_class_name, build_item_key
from sqlalchemy.ext.declarative import AbstractConcreteBase, declared_attr
from swaggerit.method import SwaggerMethod
from swaggerit.models.orm.factory import FactoryOrmModels
from swaggerit.request import SwaggerRequest
from swaggerit.utils import get_dir_path, get_swagger_json
import ujson
store_items_metaschema = get_swagger_json(__file__, 'store_items_metaschema.json')
ItemValidator = create(store_items_metaschema, Draft4Validator.VALIDATORS)
ItemValidator.DEFAULT_TYPES['simpleObject'] = dict
class _ItemTypesModelBase(AbstractConcreteBase):
__tablename__ = 'item_types'
__swagger_json__ = get_swagger_json(__file__)
__schema_dir__ = get_dir_path(__file__)
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), unique=True, nullable=False)
schema_json = sa.Column(sa.Text, nullable=False)
store_items_class_json = sa.Column(sa.Text)
@declared_attr
def stores(cls):
return sa.orm.relationship('StoresModel', uselist=True, secondary='item_types_stores')
@property
def store_items_class(self):
if not hasattr(self, '_store_items_class'):
self._store_items_class = \
ujson.loads(self.store_items_class_json) if self.store_items_class_json \
is not None else None
return self._store_items_class
async def _setattr(self, attr_name, value, session, input_):
if attr_name == 'schema':
self._validate_input(value)
value = ujson.dumps(value)
attr_name = 'schema_json'
if attr_name == 'store_items_class':
value = ujson.dumps(value)
attr_name = 'store_items_class_json'
await super()._setattr(attr_name, value, session, input_)
def _validate_input(self, schema):
validate(schema, get_swagger_json(__file__, 'store_items_metaschema.json'))
for id_name in schema['id_names']:
if id_name not in schema.get('properties', {}):
raise ValidationError(
"id_name '{}' was not found in schema properties".format(id_name),
instance=schema['id_names'], schema=schema)
def _format_output_json(self, dict_inst, todict_schema):
if todict_schema.get('schema') is not False:
if 'schema_json' in dict_inst:
dict_inst['schema'] = ujson.loads(dict_inst.pop('schema_json'))
schema_properties = dict_inst['schema'].get('properties', {})
schema_properties_names = sorted(schema_properties.keys())
dict_inst['available_filters'] = \
[{'name': name, 'schema': schema_properties[name]} \
for name in schema_properties_names if name != '_operation']
if todict_schema.get('store_items_class') is not False:
dict_inst.pop('store_items_class_json')
dict_inst['store_items_class'] = self.store_items_class
@classmethod
async def swagger_get_filter_types(cls, req, session):
filters_factory = cls.get_model('slot_filters').__factory__
body = ujson.dumps(filters_factory.get_filter_types())
return cls._build_response(200, body=body)
class _StoreItemsOperationsMixin(object):
@classmethod
async def swagger_insert_items(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
resp = await store_items_model.swagger_insert(req, session)
await cls._set_stock_filter(store_items_model, session)
return resp
@classmethod
async def _get_store_items_model(cls, req, session):
id_ = req.path_params['id']
store_id = req.query.pop('store_id')
item_type = await cls._get_item_type(id_, store_id, session)
if item_type is None:
return None
return cls.get_store_items_model(item_type, store_id)
@classmethod
async def _get_item_type(cls, id_, store_id, session):
item_types = await cls.get(session, {'id': id_})
if not item_types or not (item_types and cls._has_store(item_types[0], store_id)):
return None
return item_types[0]
@classmethod
def _has_store(cls, item_type, store_id):
for store in item_type['stores']:
if store['id'] == store_id:
return True
return False
@classmethod
def get_store_items_model(cls, item_type, store_id):
store_items_model_key = build_item_key('store_items', item_type['name'], store_id)
store_items_model = cls.get_model(store_items_model_key)
if store_items_model is None:
store_items_model = \
cls._set_store_items_model(item_type, store_items_model_key, store_id)
return store_items_model
@classmethod
def _set_store_items_model(cls, item_type, store_items_model_key, store_id):
class_name = build_class_name(item_type['name'], str(store_id))
base_class = cls._get_store_items_class(item_type)
return FactoryOrmModels.make_redis_elsearch(
class_name, item_type['schema']['id_names'],
store_items_model_key, use_elsearch=False,
metaclass=_StoreItemsModelBaseMeta,
base=base_class,
extra_attributes={
'insert_validator': cls._build_insert_validator(item_type),
'update_validator': cls._build_update_validator(item_type),
'atomic_update_validator': ItemValidator(item_type['schema']),
'item_type': item_type
}
)
@classmethod
def _get_store_items_class(cls, item_type):
return ModuleObjectLoader.load({
'path': item_type['store_items_class']['module'],
'object_name': item_type['store_items_class']['class_name']
}) if item_type['store_items_class'] else object
@classmethod
def _build_insert_validator(cls, item_type):
return ItemValidator({
'type': 'array',
'minItems': 1,
'items': item_type['schema']
})
@classmethod
def _build_update_validator(cls, item_type):
schema = deepcopy(item_type['schema'])
properties = schema.get('properties')
if properties:
properties['_operation'] = {'enum': ['delete', 'update']}
return ItemValidator({
'type': 'array',
'minItems': 1,
'items': schema
})
@classmethod
async def swagger_update_items(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
resp = await store_items_model.swagger_update_many(req, session)
await cls._set_stock_filter(store_items_model, session)
return resp
@classmethod
async def swagger_get_item(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
req = cls._cast_request(req)
return await store_items_model.swagger_get(req, session)
@classmethod
def _cast_request(cls, req):
return SwaggerRequest(
req.path, req.method,
scheme=req.scheme,
host=req.host,
path_params=req.path_params['item_key'],
query=req.query,
headers=req.headers,
body=req.body,
body_schema=req.body_schema,
context=req.context
)
@classmethod
async def swagger_update_item(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
req = cls._cast_request(req)
return await store_items_model.swagger_atomic_update(req, session)
@classmethod
async def swagger_get_all_items(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
return await store_items_model.swagger_get_all(req, session)
@classmethod
async def swagger_search_items(cls, req, session):
store_items_model = await cls._get_store_items_model(req, session)
if store_items_model is None:
return cls._build_response(404)
return await store_items_model.swagger_search(req, session)
@classmethod
async def _set_stock_filter(cls, store_items_model, session):
items_indices_map_dict = await store_items_model.indices_map.get_all(session)
items_indices_map_len = await store_items_model.indices_map.get_length(session)
if items_indices_map_dict.values():
items_keys = set(await session.redis_bind.hkeys(store_items_model.__key__))
items_indices_keys = set(items_indices_map_dict.keys())
remaining_keys = items_indices_keys.intersection(items_keys)
old_keys = items_indices_keys.difference(items_keys)
items = []
cls._set_stock_item(
store_items_model, remaining_keys,
items_indices_map_dict, True, items
)
cls._set_stock_item(store_items_model, old_keys, items_indices_map_dict, False, items)
stock_filter = BooleanFilterBy(store_items_model, 'stock')
await stock_filter.update(session, items, items_indices_map_len)
@classmethod
def _set_stock_item(cls, store_items_model, keys, items_indices_map_dict, value, items):
for key in keys:
item = {}
store_items_model.set_instance_ids(item, key)
item.update({'stock': value, 'index': int(items_indices_map_dict[key])})
items.append(item)
class ItemTypesModelBase(_ItemTypesModelBase, _StoreItemsOperationsMixin):
pass
def build_item_types_stores_table(metadata, **kwargs):
return sa.Table(
'item_types_stores', metadata,
sa.Column('item_type_id', sa.Integer, sa.ForeignKey('item_types.id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True),
sa.Column('store_id', sa.Integer, sa.ForeignKey('stores.id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True),
**kwargs)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from keystone.common.policies import base
# NOTE(lbragstad): Both endpoints and services are system-level resources.
# System-scoped tokens should be required to manage policy associations to
# existing system-level resources.
DEPRECATED_REASON = (
"The policy association API is now aware of system scope and default "
"roles."
)
deprecated_check_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_policy_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'get_policy_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_endpoints_for_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoints_for_policy',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN
)
policy_association_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_policy_association_for_endpoint',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Associate a policy to a specific endpoint.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'PUT'}],
deprecated_rule=deprecated_create_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_endpoint',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='Check policy association for endpoint.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'GET'},
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'HEAD'}],
deprecated_rule=deprecated_check_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy_association_for_endpoint',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete policy association for endpoint.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_policy_association_for_service',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Associate a policy to a specific service.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'PUT'}],
deprecated_rule=deprecated_create_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_service',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='Check policy association for service.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'GET'},
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'HEAD'}],
deprecated_rule=deprecated_check_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy_association_for_service',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete policy association for service.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % (
'create_policy_association_for_region_and_service'),
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description=('Associate a policy to a specific region and service '
'combination.'),
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'PUT'}],
deprecated_rule=deprecated_create_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_region_and_service',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='Check policy association for region and service.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'GET'},
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'HEAD'}],
deprecated_rule=deprecated_check_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % (
'delete_policy_association_for_region_and_service'),
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete policy association for region and service.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_policy_for_endpoint',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='Get policy for endpoint.',
operations=[{'path': ('/v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'
'policy'),
'method': 'GET'},
{'path': ('/v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'
'policy'),
'method': 'HEAD'}],
deprecated_rule=deprecated_get_policy_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoints_for_policy',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='List endpoints for policy.',
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints'),
'method': 'GET'}],
deprecated_rule=deprecated_list_endpoints_for_policy)
]
def list_rules():
return policy_association_policies
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
from __future__ import print_function
import os, glob, optparse, re, shutil, subprocess, sys, string, time
libexecpath = os.path.abspath(sys.path[0] + '/../libexec')
sys.path.insert(0, libexecpath) # prepend to Python path
from stereo_utils import get_asp_version
import asp_system_utils
asp_system_utils.verify_python_version_is_supported()
job_pool = [];
# Global output folder variable
outputFolder = ""
def man(option, opt, value, parser):
print(parser.usage, file=sys.stderr)
print('''\
This program operates on LRO (.IMG) files, and performs the
following ISIS 3 operations:
* Converts to ISIS format (lronac2isis)
* Attaches SPICE information (spiceinit and spicefit)
* Performs radiometric calibration (lronaccal)
* lronacecho?
* Removes camera distortions from the CCD images (noproj)
* Performs jitter analysis (lrojitreg)
* Mosaics individual CCDs into one unified image file (handmos)
* Normalizes the mosaic (cubenorm)
''', file=sys.stderr)
sys.exit()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def add_job( cmd, num_working_threads=4 ):
if ( len(job_pool) >= num_working_threads):
job_pool[0].wait();
job_pool.pop(0);
print(cmd)
job_pool.append( subprocess.Popen(cmd, shell=True, env=os.environ) );
def wait_on_all_jobs():
print("Waiting for jobs to finish")
while len(job_pool) > 0:
job_pool[0].wait();
job_pool.pop(0);
# Go through a list of cubes and sort them into left/right pairs
def build_cube_pairs(cubePaths):
pairDict = dict();
for cube in cubePaths:
print(cube)
m = re.search('\D*(\d+)(.).*',os.path.basename(cube))
number = m.group(1)
sideLetter = m.group(2)
if (number not in pairDict):
pairDict[number] = ['', ''];
# Store the path in the spot for either the left or right cube
if (sideLetter == "L"):
pairDict[number][0] = cube; # Left
else:
pairDict[number][1] = cube; # Right
return pairDict
def read_flatfile( flat ):
# Fail if the input file is not present
if not os.path.isfile(flat):
raise Exception('File ' + flat + ' is missing!')
averages = [0.0,0.0]
f = open(flat,'r')
for line in f:
if ( line.rfind("Average Sample Offset:") >= 0 ):
index = line.rfind("Offset:");
index_e = line.rfind("StdDev:");
crop = line[index+7:index_e];
if crop == " NULL ": # Check for null value
raise Exception('Null sample offset in file ' + flat)
averages[0] = float(crop);
elif ( line.rfind("Average Line Offset:") >= 0 ):
index = line.rfind("Offset:");
index_e = line.rfind("StdDev:");
crop = line[index+7:index_e];
if crop == " NULL ": # Check for null value
raise Exception('Null sample offset in file ' + flat)
averages[1] = float(crop);
elif ( line.rfind("Using IpFind result only:") >= 0 ):
index = line.rfind("only:");
if (line[index + 7] == 1):
print("Warning: This result based only on IpFind search.")
print(str(averages))
return averages
# Call lronac2isis on each input file, return list of output files.
def lronac2isis( img_files, threads, outputFolder ):
lronac2isis_cubs = []
for img in img_files:
# Expect to end in .IMG, change to end in .cub and move to output folder
newExtension = os.path.splitext(img)[0] + '.cub'
cubFilePath = os.path.join(outputFolder, os.path.basename(newExtension))
if( os.path.exists(cubFilePath) ):
print(cubFilePath + ' exists, skipping lronac2isis.')
else:
cmd = 'lronac2isis from='+ img +' to='+ cubFilePath
add_job(cmd, threads)
lronac2isis_cubs.append( cubFilePath )
wait_on_all_jobs()
return lronac2isis_cubs
# Call lronaccal on each input file, return list of output files.
def lronaccal( cub_files, threads, delete=False ):
lronaccal_cubs = []
for cub in cub_files:
# Expect to end in .cub, change to end in .lronaccal.cub
to_cub = os.path.splitext(cub)[0] + '.lronaccal.cub'
if( os.path.exists(to_cub) ):
print(to_cub + ' exists, skipping lronaccal.')
else:
cmd = 'lronaccal from='+ cub +' to='+ to_cub
add_job(cmd, threads)
lronaccal_cubs.append( to_cub )
wait_on_all_jobs()
if( delete ): # Delete all input .cub files and log files
for cub in cub_files:
os.remove( cub )
lronaccal_log_files = glob.glob( os.path.commonprefix(cub_files) + '*.lronaccal.log' )
for file in lronaccal_log_files:
os.remove( file )
return lronaccal_cubs
# Call lronacecho on each input file, return list of output files.
def lronacecho( cub_files, threads, delete=False ):
lronacecho_cubs = []
for cub in cub_files:
# Expect to end in .cub, change to end in .lronaccal.cub
to_cub = os.path.splitext(cub)[0] + '.lronacecho.cub'
if( os.path.exists(to_cub) ):
print(to_cub + ' exists, skipping lronacecho.')
else:
cmd = 'lronacecho from='+ cub +' to='+ to_cub
add_job(cmd, threads)
lronacecho_cubs.append( to_cub )
wait_on_all_jobs()
if( delete ): # Delete all input .cub files and log files
for cub in cub_files:
os.remove( cub )
return lronacecho_cubs
def spice( cub_files, threads):
for cub in cub_files:
cmd = 'spiceinit web=false from='+ cub
add_job(cmd, threads)
wait_on_all_jobs()
for cub in cub_files:
cmd = 'spicefit from='+ cub
add_job(cmd, threads)
wait_on_all_jobs()
return
# Returns true if the .cub LRONAC file has CROSSTRACK_SUMMING = 1
def isFileHalfRes(cubFilePath):
return False; # It looks like the normal pvl file works so use it in all cases
f = open(cubFilePath, 'r')
for line in f:
if ( line.rfind("CROSSTRACK_SUMMING") >= 0 ):
index = line.rfind("=");
crop = line[index+2];
result = (crop == "2")
f.close()
return result;
# Left file is/home/smcmich1 in index 0, right is in index 1
def noproj( file_pairs, threads, delete, fakePvl, outputFolder):
if fakePvl: # Generate temporary PVL files containing LRONAC definition
# - We need one for full-res mode, one for half-X-res mode.
fullResFilePath = os.path.join(outputFolder, 'noprojInstruments_fullRes.pvl')
if os.path.exists(fullResFilePath):
print(fullResFilePath + ' exists, using existing file.')
else: # Need to write the file
print('Generating LRONAC compatible .pvl file ' + fullResFilePath)
f = open(fullResFilePath, 'w')
f.write('Object = IdealInstrumentsSpecifications\n');
f.write(' UserName = auto\n');
f.write(' Created = 2013-07-18T13:42:00\n');
f.write(' LastModified = 2013-07-18T13:42:00\n\n');
f.write(' Group = "LUNAR RECONNAISSANCE ORBITER/NACL"\n');
f.write(' TransY = 16.8833\n')
f.write(' ItransS = -2411.9\n')
f.write(' TransX = 0.6475\n')
f.write(' ItransL = -92.5\n')
f.write(' DetectorSamples = 10000\n')
f.write(' End_Group\n\n')
f.write('End_Object\n')
f.write('End')
f.close()
halfResFilePath = os.path.join(outputFolder, 'noprojInstruments_halfRes.pvl')
if os.path.exists(halfResFilePath):
print(halfResFilePath + ' exists, using existing file.')
else: # Need to write the file
print('Generating LRONAC compatible .pvl file ' + halfResFilePath)
f = open(halfResFilePath, 'w')
f.write('Object = IdealInstrumentsSpecifications\n');
f.write(' UserName = auto\n');
f.write(' Created = 2013-07-18T13:42:00\n');
f.write(' LastModified = 2013-07-18T13:42:00\n\n');
f.write(' Group = "LUNAR RECONNAISSANCE ORBITER/NACL"\n');
f.write(' TransY = 16.8833\n')
f.write(' ItransS = -4823.8\n') # Halved
f.write(' TransX = 0.6475\n')
f.write(' ItransL = -185\n') # Halved
f.write(' DetectorSamples = 5000\n') # Halved
f.write(' End_Group\n\n')
f.write('End_Object\n')
f.write('End')
f.close()
noproj_pairs = dict();
for k, v in file_pairs.items():
noproj_pairs[k] = ['', ''];
for i in range(2): # Process left and right image
to_cub = os.path.splitext(v[i])[0] + '.noproj.cub'
noproj_pairs[k][i] = to_cub; # Add file to output list
if os.path.exists( to_cub ):
print(to_cub + ' exists, skipping noproj.')
else:
# Generate pvl command if needed
if fakePvl:
fileIsHalfRes = isFileHalfRes(v[0])
if fileIsHalfRes:
specsLine = ' specs=' + os.path.abspath(halfResFilePath) + ' ';
else: # Full resolution
specsLine = ' specs=' + os.path.abspath(fullResFilePath) + ' ';
else: # Use the default file
specsLine = '';
# Multiple noproj threads will create clashing temporary files
# so we need to make temporary directories to run each thread in.
tempDir = 'temp_' + str(k) + '_' + str(i)
tempDir = os.path.join(outputFolder, tempDir)
cmd = 'mkdir -p ' + tempDir + ' && ' \
+ 'cd ' + tempDir + ' && ' \
+ 'noproj from=' + os.path.abspath(v[i]) \
+ ' match=' + os.path.abspath(v[0]) \
+ specsLine \
+ ' to=' + os.path.abspath(to_cub) + ' && ' \
+ 'cd .. && rm -rf ' + tempDir
add_job(cmd, threads)
wait_on_all_jobs()
if( delete ): # Clean up input cube files
for v in file_pairs.values():
os.remove( v[0] );
os.remove( v[1] );
# if fakePvl: # These are not deleted in case this program is running in multiple threads
# os.remove( halfResFilePath );
# os.remove( fullResFilePath );
return noproj_pairs;
def lronacjitreg( noproj_pairs, threads, delete=False ):
boundsCommands = '--correlator-type 2 --kernel 15 15'
for k,v in noproj_pairs.items():
cmd = 'lronacjitreg ' + boundsCommands \
+ ' --output-log outputLog_'+str(k)+'.txt' \
+ ' '+ v[0] \
+ ' '+ v[1];
add_job(cmd, threads)
wait_on_all_jobs()
# Read in all the shift values from the output text files
averages = dict()
for k,v in noproj_pairs.items():
flat_file = 'outputLog_'+str(k)+'.txt'
print('Reading log file ' + flat_file)
averages[k] = read_flatfile( flat_file )
if delete:
os.remove( flat_file )
return averages
def mosaic( noproj_pairs, averages, threads ):
mosaicList = dict();
for k,v in noproj_pairs.items():
# Create mosaic output file
mosaicPath = os.path.splitext(v[0])[0] + '.mosaic.cub'
shutil.copy( v[0], mosaicPath ) # Copy the LE image to the output path
xOffset = -1*averages[k][0] # Sign convention changes here
yOffset = -1*averages[k][1]
handmos( v[1], mosaicPath,
str( int(round( xOffset )) ),
str( int(round( yOffset )) ),
threads )
mosaicList[k] = mosaicPath;
wait_on_all_jobs()
return mosaicList
def handmos( fromcub, tocub, outsamp, outline, threads ):
cmd = 'handmos from='+ fromcub +' mosaic='+ tocub \
+' outsample = '+ str(outsamp) \
+' outline = ' + str(outline) \
+' matchbandbin=FALSE priority=ontop';
add_job(cmd, threads);
return
def cubenorm( mosaicList, threads, delete=False ):
normedList = dict();
for k,v in mosaicList.items():
normedPath = os.path.splitext(v)[0] + '.norm.cub'
cmd = 'cubenorm from='+ v +' to='+ normedPath
add_job(cmd, threads);
normedList[k] = normedPath;
wait_on_all_jobs()
if( delete ): # Clean up input cube files
for v in mosaicList.values():
os.remove(v);
return normedList
def cropInputs(inputFiles, outputFolder, cropAmount, threads, delete=False):
outputPaths = []
for path in inputFiles:
# Expect to end in .IMG, change to end in .cub and move to output folder
newExtension = os.path.splitext(path)[0] + '.cropped.cub'
croppedPath = os.path.join(outputFolder, os.path.basename(newExtension))
cmd = 'crop from='+ path +' to='+ croppedPath + ' nlines=' + str(cropAmount)
add_job(cmd, threads)
outputPaths.append( croppedPath )
wait_on_all_jobs()
if delete:
for path in inputFiles:
os.remove(path)
return outputPaths
#--------------------------------------------------------------------------------
#TODO: Support for file based logging of results
def main():
try:
try:
usage = "usage: lronac2mosaic.py [--help][--manual][--crop][--threads N]" \
"[--keep] LRONAC.IMG-files\n " + get_asp_version()
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(delete =True)
parser.set_defaults(cropAmount=0)
parser.set_defaults(threads=4)
parser.set_defaults(fakePvl=True)
parser.add_option("--manual", action="callback", callback=man,
help="Read the manual.")
parser.add_option("-o", "--output-dir", dest="outputFolder",
help="Output folder (default to input folder).",type="string")
parser.add_option("--stop-at-no-proj", dest="stop_no_proj", action="store_true",
help="Process the IMG files only to have SPICE attached.")
parser.add_option("--resume-at-no-proj", dest="resume_no_proj", action="store_true",
help="Pick back up after spiceinit has happened. This was noproj uses your new camera information")
parser.add_option("-c", "--crop", dest="cropAmount",
help="Process only the first N lines of the image.",type="int")
parser.add_option("-t", "--threads", dest="threads",
help="Number of threads to use.",type="int")
parser.add_option("-k", "--keep", action="store_false",
dest="delete",
help="Will not delete intermediate files.")
parser.add_option("--p", dest="fakePvl", action="store_true",
help="Don't automatically create a LRONAC pvl file")
(options, args) = parser.parse_args()
if not args: parser.error("need .IMG files")
except optparse.OptionError as msg:
raise Usage(msg)
# Make sure only one pair of cubes was passed in
input_file_pair = build_cube_pairs(args)
if len(input_file_pair) > 1:
raise Usage('Input error: Only one pair of input files are allowed, ' +
'with names like M1127782730LE.IMG and M1127782730RE.IMG.')
if not options.outputFolder: # Set the output folder equal to the input folder
options.outputFolder = os.path.dirname(args[0])
print('Using output folder: ' + options.outputFolder)
if not os.path.exists(options.outputFolder) and len(options.outputFolder) > 1:
os.makedirs(options.outputFolder)
print("Beginning processing.....")
if not options.resume_no_proj: # If not skipping to later point
print("lronac2isis") # Per-file operation, returns list of new files
lronac2isised = lronac2isis( args, options.threads, options.outputFolder )
print("lronaccal") # Per-file operation, returns list of new files
lronaccaled = lronaccal( lronac2isised, options.threads, options.delete )
print("lronacecho") # Per-file operation, returns list of new files
lronacechod = lronacecho( lronaccaled, options.threads, options.delete )
if (options.cropAmount > 0): # Crop the input files as soon as ISIS calls allow it
lronacechod = cropInputs(lronacechod, options.outputFolder, options.cropAmount,
options.threads, options.delete)
print("spice") # Attach spice info to cubes (adds to existing files)
spice( lronacechod, options.threads )
if options.stop_no_proj: # Stop early if requested
print("Finished")
return 0
if options.resume_no_proj: # If resume option was set
lronacechod = args
print("build_cube_pairs") # Detected corresponding pairs of cubes
lronac_file_pairs = build_cube_pairs(lronacechod)
print("noproj") # Per-file operation
noprojed_file_pairs = noproj( lronac_file_pairs, options.threads, options.delete, options.fakePvl, options.outputFolder)
print("lronacjitreg") # Determines mean shift for each file pair
averages = lronacjitreg( noprojed_file_pairs, options.threads, options.delete )
print("mosaic") # handmos - Use mean shifts to combine the file pairs
mosaicked = mosaic( noprojed_file_pairs, averages, options.threads )
# Clean up noproj files
if( options.delete ):
for cub in noprojed_file_pairs.values():
os.remove( cub[0] )
os.remove( cub[1] )
# Run a final cubenorm across the image:
cubenorm( mosaicked, options.threads, options.delete )
print("Finished")
return 0
except Usage as err:
print(err.msg, file=sys.stderr)
return 2
# To more easily debug this program, comment out this catch block.
# except Exception as err:
# sys.stderr.write( str(err) + '\n' )
# return 1
if __name__ == "__main__":
sys.exit(main())
| |
from boto.swf.exceptions import SWFResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_swf_deprecated
from moto.swf import swf_backend
from ..utils import setup_workflow
# PollForDecisionTask endpoint
@mock_swf_deprecated
def test_poll_for_decision_task_when_one():
conn = setup_workflow()
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"])
resp = conn.poll_for_decision_task("test-domain", "queue", identity="srv01")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"]
)
resp["events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal(
"srv01"
)
@mock_swf_deprecated
def test_poll_for_decision_task_previous_started_event_id():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
assert resp["workflowExecution"]["runId"] == conn.run_id
assert "previousStartedEventId" not in resp
# Require a failing decision, in this case a non-existant activity type
attrs = {
"activityId": "spam",
"activityType": {"name": "test-activity", "version": "v1.42"},
"taskList": "eggs",
}
decision = {
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": attrs,
}
conn.respond_decision_task_completed(resp["taskToken"], decisions=[decision])
resp = conn.poll_for_decision_task("test-domain", "queue")
assert resp["workflowExecution"]["runId"] == conn.run_id
assert resp["previousStartedEventId"] == 3
@mock_swf_deprecated
def test_poll_for_decision_task_when_none():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
resp = conn.poll_for_decision_task("test-domain", "queue")
# this is the DecisionTask representation you get from the real SWF
# after waiting 60s when there's no decision to be taken
resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_decision_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "non-existent-queue")
resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_decision_task_with_reverse_order():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue", reverse_order=True)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]
)
# CountPendingDecisionTasks endpoint
@mock_swf_deprecated
def test_count_pending_decision_tasks():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
resp = conn.count_pending_decision_tasks("test-domain", "queue")
resp.should.equal({"count": 1, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_decision_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_after_decision_completes():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
conn.respond_decision_task_completed(resp["taskToken"])
resp = conn.count_pending_decision_tasks("test-domain", "queue")
resp.should.equal({"count": 0, "truncated": False})
# RespondDecisionTaskCompleted endpoint
@mock_swf_deprecated
def test_respond_decision_task_completed_with_no_decision():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
resp = conn.respond_decision_task_completed(
task_token, execution_context="free-form context"
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
[
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
]
)
evt = resp["events"][-1]
evt["decisionTaskCompletedEventAttributes"].should.equal(
{
"executionContext": "free-form context",
"scheduledEventId": 2,
"startedEventId": 3,
}
)
resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234")
resp["latestExecutionContext"].should.equal("free-form context")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_wrong_token():
conn = setup_workflow()
conn.poll_for_decision_task("test-domain", "queue")
conn.respond_decision_task_completed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_respond_decision_task_completed_on_close_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_decision_task_completed.when.called_with(task_token).should.throw(
SWFResponseError
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_task_already_completed():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
conn.respond_decision_task_completed(task_token)
conn.respond_decision_task_completed.when.called_with(task_token).should.throw(
SWFResponseError
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_complete_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"},
}
]
resp = conn.respond_decision_task_completed(task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
[
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"WorkflowExecutionCompleted",
]
)
resp["events"][-1]["workflowExecutionCompletedEventAttributes"][
"result"
].should.equal("foo bar")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_close_decision_not_last():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{"decisionType": "CompleteWorkflowExecution"},
{"decisionType": "WeDontCare"},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(SWFResponseError, r"Close must be last decision in list")
@mock_swf_deprecated
def test_respond_decision_task_completed_with_invalid_decision_type():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{"decisionType": "BadDecisionType"},
{"decisionType": "CompleteWorkflowExecution"},
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(
SWFResponseError,
r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'",
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_missing_attributes():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{
"decisionType": "should trigger even with incorrect decision type",
"startTimerDecisionAttributes": {},
}
]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(
SWFResponseError,
r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' "
r"failed to satisfy constraint: Member must not be null",
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_missing_attributes_totally():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [{"decisionType": "StartTimer"}]
conn.respond_decision_task_completed.when.called_with(
task_token, decisions=decisions
).should.throw(
SWFResponseError,
r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' "
r"failed to satisfy constraint: Member must not be null",
)
@mock_swf_deprecated
def test_respond_decision_task_completed_with_fail_workflow_execution():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {
"reason": "my rules",
"details": "foo",
},
}
]
resp = conn.respond_decision_task_completed(task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
[
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"WorkflowExecutionFailed",
]
)
attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"]
attrs["reason"].should.equal("my rules")
attrs["details"].should.equal("foo")
@mock_swf_deprecated
@freeze_time("2015-01-01 12:00:00")
def test_respond_decision_task_completed_with_schedule_activity_task():
conn = setup_workflow()
resp = conn.poll_for_decision_task("test-domain", "queue")
task_token = resp["taskToken"]
decisions = [
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "my-activity-001",
"activityType": {"name": "test-activity", "version": "v1.1"},
"heartbeatTimeout": "60",
"input": "123",
"taskList": {"name": "my-task-list"},
},
}
]
resp = conn.respond_decision_task_completed(task_token, decisions=decisions)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(
[
"WorkflowExecutionStarted",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"DecisionTaskCompleted",
"ActivityTaskScheduled",
]
)
resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal(
{
"decisionTaskCompletedEventId": 4,
"activityId": "my-activity-001",
"activityType": {"name": "test-activity", "version": "v1.1"},
"heartbeatTimeout": "60",
"input": "123",
"taskList": {"name": "my-task-list"},
}
)
resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234")
resp["latestActivityTaskTimestamp"].should.equal(1420113600.0)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from uuid import uuid4
from datetime import datetime
import hashlib
import logging
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from sqlalchemy import or_
from ujson import dumps
from tornado.concurrent import return_future
from holmes.models import Base
from holmes.utils import get_domain_from_url
class Page(Base):
__tablename__ = "pages"
id = sa.Column(sa.Integer, primary_key=True)
url = sa.Column('url', sa.String(2000), nullable=False)
url_hash = sa.Column('url_hash', sa.String(128), nullable=False)
uuid = sa.Column('uuid', sa.String(36), default=uuid4, nullable=False)
created_date = sa.Column('created_date', sa.DateTime, default=datetime.utcnow, nullable=False)
domain_id = sa.Column('domain_id', sa.Integer, sa.ForeignKey('domains.id'))
reviews = relationship("Review", backref="page", foreign_keys='[Review.page_id]')
last_review_id = sa.Column('last_review_id', sa.Integer, sa.ForeignKey('reviews.id'))
last_review = relationship("Review", foreign_keys=[last_review_id])
last_review_date = sa.Column('last_review_date', sa.DateTime, nullable=True)
last_review_uuid = sa.Column('last_review_uuid', sa.String(36), nullable=True)
last_modified = sa.Column('last_modified', sa.DateTime, nullable=True)
expires = sa.Column('expires', sa.DateTime, nullable=True)
violations_count = sa.Column('violations_count', sa.Integer, server_default='0', nullable=False)
score = sa.Column('score', sa.Float, default=0.0, nullable=False)
def to_dict(self):
return {
'uuid': str(self.uuid),
'url': self.url,
'lastModified': self.last_modified,
'expires': self.expires,
'score': self.score
}
def __str__(self):
return str(self.uuid)
def __repr__(self):
return str(self)
def get_violations_per_day(self, db):
from holmes.models import Review, Violation # Prevent circular dependency
violations = db \
.query(
sa.func.year(Review.completed_date).label('year'),
sa.func.month(Review.completed_date).label('month'),
sa.func.day(Review.completed_date).label('day'),
sa.func.count(Violation.id).label('violation_count'),
sa.func.sum(Violation.points).label('violation_points')
).join(
Page, Page.id == Review.page_id
).join(
Violation, Violation.review_id == Review.id
).filter(Review.is_complete == True).filter(Review.page_id == self.id) \
.group_by(
sa.func.year(Review.completed_date),
sa.func.month(Review.completed_date),
sa.func.day(Review.completed_date),
) \
.order_by(Review.completed_date) \
.all()
result = []
for day in violations:
dt = "%d-%d-%d" % (day.year, day.month, day.day)
result.append({
"completedAt": dt,
"violation_count": int(day.violation_count),
"violation_points": int(day.violation_points)
})
return result
@classmethod
def by_uuid(cls, uuid, db):
return db.query(Page).filter(Page.uuid == uuid).first()
@classmethod
def by_url_hash(cls, url_hash, db):
return db.query(Page).filter(Page.url_hash == url_hash).first()
@classmethod
def get_page_count(cls, db):
return int(db.query(sa.func.count(Page.id)).scalar())
@classmethod
@return_future
def add_page(cls, db, cache, url, score, fetch_method, publish_method,
config, girl, default_violations_values, violation_definitions, callback):
domain_name, domain_url = get_domain_from_url(url)
if not url or not domain_name:
callback((False, url, {
'reason': 'invalid_url',
'url': url,
'status': None,
'details': 'Domain name could not be determined.'
}))
return
logging.debug('Obtaining "%s"...' % url)
fetch_method(
url,
cls.handle_request(cls.handle_add_page(
db, cache, url, score, publish_method, config, girl,
default_violations_values, violation_definitions, callback
)),
proxy_host=config.HTTP_PROXY_HOST,
proxy_port=config.HTTP_PROXY_PORT,
user_agent=config.HOLMES_USER_AGENT,
)
@classmethod
def handle_request(cls, callback):
def handle(*args, **kw):
response = args[-1] # supports (url, response) and just response
if hasattr(response, 'status_code'):
status_code = response.status_code
elif hasattr(response, 'code'):
status_code = response.code
else:
status_code = 400
if hasattr(response, 'body'):
text = response.body
elif hasattr(response, 'text'):
text = response.text
else:
text = 'Empty response.text'
callback(status_code, text, response.effective_url)
return handle
@classmethod
def handle_add_page(cls, db, cache, url, score, publish_method, config,
girl, default_violations_values, violation_definitions, callback):
def handle(code, body, effective_url):
if code > 399:
callback((False, url, {
'reason': 'invalid_url',
'url': url,
'status': code,
'details': body
}))
return
if effective_url != url:
callback((False, url, {
'reason': 'redirect',
'url': url,
'effectiveUrl': effective_url
}))
return
domain = cls.add_domain(
url, db, publish_method, config, girl,
default_violations_values, violation_definitions, cache
)
page_uuid = cls.insert_or_update_page(
url, score, domain, db, publish_method, cache, config
)
callback((True, url, page_uuid))
return handle
@classmethod
def insert_or_update_page(cls, url, score, domain, db, publish_method, cache, config):
url = url.encode('utf-8')
url_hash = hashlib.sha512(url).hexdigest()
page = Page.by_url_hash(url_hash, db)
if page:
cache.increment_page_score(page.url)
return page.uuid
try:
page_uuid = uuid4()
query_params = {
'url': url,
'url_hash': url_hash,
'uuid': page_uuid,
'domain_id': domain.id,
'created_date': datetime.utcnow(),
'score': score
}
db.execute(
'INSERT INTO pages (url, url_hash, uuid, domain_id, created_date, score) '
'VALUES (:url, :url_hash, :uuid, :domain_id, :created_date, :score) ON DUPLICATE KEY '
'UPDATE score = :score',
query_params
)
except Exception:
err = sys.exc_info()[1]
if 'Duplicate entry' in str(err):
logging.debug('Duplicate entry! (Details: %s)' % str(err))
else:
raise
publish_method(dumps({
'type': 'new-page',
'pageUrl': str(url)
}))
return page_uuid
@classmethod
def add_domain(cls, url, db, publish_method, config, girl,
default_violations_values, violation_definitions, cache):
from holmes.models import Domain, DomainsViolationsPrefs
from holmes.material import expire_materials
domain_name, domain_url = get_domain_from_url(url)
domains = db.query(Domain).filter(or_(
Domain.name == domain_name,
Domain.name == domain_name.rstrip('/'),
Domain.name == "%s/" % domain_name
)).all()
if not domains:
domain = None
else:
domain = domains[0]
if not domain:
url_hash = hashlib.sha512(domain_url).hexdigest()
domain = Domain(url=domain_url, url_hash=url_hash, name=domain_name)
db.add(domain)
db.flush()
expire_materials(girl)
publish_method(dumps({
'type': 'new-domain',
'domainUrl': str(domain_url)
}))
keys = default_violations_values.keys()
DomainsViolationsPrefs.insert_default_violations_values_for_domain(
db, domain, keys, violation_definitions, cache
)
from holmes.models import Limiter
connections = config.DEFAULT_NUMBER_OF_CONCURRENT_CONNECTIONS
Limiter.add_or_update_limiter(db, domain_url, connections)
return domain
| |
import fnmatch
import glob
import io
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
import django
from django.core.management.base import CommandError, NoArgsCommand
from django.core.management.utils import (handle_extensions, find_command,
popen_wrapper)
from django.utils.encoding import force_str
from django.utils.functional import total_ordering
from django.utils.text import get_text_list
from django.utils.jslex import prepare_js_for_gettext
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
def process(self, command, domain):
"""
Extract translatable literals from self.file for :param domain:,
creating or updating the POT file.
Uses the xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if command.verbosity > 1:
command.stdout.write('processing file %s in %s\n' % (self.file, self.dirpath))
_, file_ext = os.path.splitext(self.file)
if domain == 'djangojs' and file_ext in command.extensions:
is_templatized = True
orig_file = os.path.join(self.dirpath, self.file)
with open(orig_file) as fp:
src_data = fp.read()
src_data = prepare_js_for_gettext(src_data)
thefile = '%s.c' % self.file
work_file = os.path.join(self.dirpath, thefile)
with open(work_file, "w") as fp:
fp.write(src_data)
args = [
'xgettext',
'-d', domain,
'--language=C',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--from-code=UTF-8',
'--add-comments=Translators',
'--output=-'
]
if command.wrap:
args.append(command.wrap)
if command.location:
args.append(command.location)
args.append(work_file)
elif domain == 'django' and (file_ext == '.py' or file_ext in command.extensions):
thefile = self.file
orig_file = os.path.join(self.dirpath, self.file)
is_templatized = file_ext in command.extensions
if is_templatized:
with open(orig_file, "rU") as fp:
src_data = fp.read()
thefile = '%s.py' % self.file
content = templatize(src_data, orig_file[2:])
with open(os.path.join(self.dirpath, thefile), "w") as fp:
fp.write(content)
work_file = os.path.join(self.dirpath, thefile)
args = [
'xgettext',
'-d', domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--from-code=UTF-8',
'--add-comments=Translators',
'--output=-'
]
if command.wrap:
args.append(command.wrap)
if command.location:
args.append(command.location)
args.append(work_file)
else:
return
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
if is_templatized:
os.unlink(work_file)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(self.file, errors))
elif command.verbosity > 0:
# Print warnings
command.stdout.write(errors)
if msgs:
# Write/append messages to pot file
potfile = os.path.join(self.locale_dir, '%s.pot' % str(domain))
if is_templatized:
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old = '#: ' + work_file
new = '#: ' + orig_file
else:
old = '#: ' + work_file[2:]
new = '#: ' + orig_file[2:]
msgs = msgs.replace(old, new)
write_pot_file(potfile, msgs)
if is_templatized:
os.unlink(work_file)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with open(potfile, 'a') as fp:
fp.write(msgs)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt", or "js" if the domain is "djangojs"). Separate multiple extensions with commas, or use -e multiple times.',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines."),
make_option('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines."),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings."),
make_option('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging."),
)
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
leave_locale_alone = True
def handle_noargs(self, *args, **options):
locale = options.get('locale')
self.domain = options.get('domain')
self.verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
self.symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
self.wrap = '--no-wrap' if options.get('no_wrap') else ''
self.location = '--no-location' if options.get('no_location') else ''
self.no_obsolete = options.get('no_obsolete')
self.keep_pot = options.get('keep_pot')
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt']
self.extensions = handle_extensions(exts)
if (locale is None and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
# Ignoring all contrib apps
self.ignore_patterns += ['contrib/*']
else:
self.locale_paths.extend(list(settings.LOCALE_PATHS))
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
locales = []
if locale is not None:
locales = locale
elif process_all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
locales = [os.path.basename(l) for l in locale_dirs]
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
for f in file_list:
try:
f.process(self, self.domain)
except UnicodeDecodeError:
self.stdout.write("UnicodeDecodeError: skipped file %s in %s" % (f.file, f.dirpath))
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % str(self.domain))
if not os.path.exists(potfile):
continue
args = ['msguniq', '--to-code=utf-8']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.append(potfile)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
with open(potfile, 'w') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % str(self.domain))
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Helper method to get all files in the given root. Also check that there
is a matching locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
ignore = lambda pattern: fnmatch.fnmatchcase(filename, pattern)
return any(ignore(pattern) for pattern in ignore_patterns)
dir_suffix = '%s*' % os.sep
norm_patterns = [p[:-len(dir_suffix)] if p.endswith(dir_suffix) else p for p in self.ignore_patterns]
all_files = []
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
if is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
raise CommandError(
"Unable to find a locale path to store translations for file %s" % file_path)
all_files.append(TranslatableFile(dirpath, filename, locale_dir))
return sorted(all_files)
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses msgmerge, and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
args = ['msgmerge', '-q']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.extend([pofile, potfile])
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with open(potfile, 'r') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with open(pofile, 'w') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib', '-o', pofile, '--no-obsolete']
if self.wrap:
args.append(self.wrap)
if self.location:
args.append(self.location)
args.append(pofile)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with io.open(django_po, 'rU', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = force_str(m.group('value'))
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.split('\n'):
if not found and (not line or plural_forms_re.search(line)):
line = '%s\n' % plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verify that memory usage is minimal in eager mode."""
import gc
import time
from absl.testing import parameterized
import six
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import internal
# memory_profiler might not be available in the OSS version of TensorFlow.
try:
import memory_profiler # pylint:disable=g-import-not-at-top
except ImportError:
memory_profiler = None
class MemoryCleanupTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(MemoryCleanupTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
def assertMemoryNotIncreasing(self, f, num_iters, max_increase_mb):
"""Assert memory usage doesn't increase beyond given threshold for f."""
# Warm up.
f()
# Wait for background threads to start up and allocate memory.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
logging.info("Memory increase observed: %f MB" % increase)
assert increase < max_increase_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase, max_increase_mb)
def assertNoMemoryLeak(self, dataset_fn):
"""Assert consuming elements from the dataset does not leak memory."""
def run():
get_next = self.getNext(dataset_fn())
for _ in range(100):
self.evaluate(get_next())
for _ in range(10):
run()
gc.collect()
tensors = [
o for o in gc.get_objects() if isinstance(o, internal.NativeObject)
]
self.assertEmpty(tensors, "%d Tensors are still alive." % len(tensors))
@combinations.generate(test_base.eager_only_combinations())
def testEagerMemoryUsageWithReset(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
def f():
self.evaluate(multi_device_iterator.get_next())
multi_device_iterator._eager_reset()
self.assertMemoryNotIncreasing(f, num_iters=50, max_increase_mb=250)
@combinations.generate(test_base.eager_only_combinations())
def testEagerMemoryUsageWithRecreation(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
def f():
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.get_next())
del multi_device_iterator
# TODO(b/123316347): Reduce threshold once bug is fixed.
self.assertMemoryNotIncreasing(f, num_iters=50, max_increase_mb=250)
@combinations.generate(test_base.eager_only_combinations())
def testFilter(self):
def get_dataset():
def fn(_):
return True
return dataset_ops.Dataset.range(0, 100).filter(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(combinations.combine(tf_api_version=1, mode="eager"))
def testFilterLegacy(self):
def get_dataset():
def fn(_):
return True
return dataset_ops.Dataset.range(0, 100).filter_with_legacy_function(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(test_base.eager_only_combinations())
def testFlatMap(self):
def get_dataset():
def fn(x):
return dataset_ops.Dataset.from_tensors(x * x)
return dataset_ops.Dataset.range(0, 100).flat_map(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(test_base.eager_only_combinations())
def testFromGenerator(self):
def get_dataset():
def fn():
return six.moves.range(100)
return dataset_ops.Dataset.from_generator(fn, output_types=dtypes.float32)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(num_parallel_calls=[None, 10])))
def testMap(self, num_parallel_calls):
def get_dataset():
def fn(x):
return x * x
return dataset_ops.Dataset.range(0, 100).map(
fn, num_parallel_calls=num_parallel_calls)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.combine(
tf_api_version=1, mode="eager", num_parallel_calls=[None, 10]))
def testMapLegacy(self, num_parallel_calls):
def get_dataset():
def fn(x):
return x * x
return dataset_ops.Dataset.range(0, 100).map_with_legacy_function(
fn, num_parallel_calls=num_parallel_calls)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(num_parallel_calls=[None, 10])))
def testInterleave(self, num_parallel_calls):
def get_dataset():
def fn(x):
return dataset_ops.Dataset.from_tensors(x * x)
return dataset_ops.Dataset.range(0, 100).interleave(
fn, num_parallel_calls=num_parallel_calls, cycle_length=10)
self.assertNoMemoryLeak(get_dataset)
if __name__ == "__main__":
test.main()
| |
#Game class
# The main class which will provide the framework for the rest of the
# game objects to run in.
# Includes the main event loop, clock, window settings, etc.
# Requires pygame
import pygame
from engine import *
from eventManager import *
from widgets import *
from ball import *
from brick import *
from paddle import *
class Game(Engine):
def __init__(self):
# pass a reference to the game to the Engine
# this will allow easy access to other things in the Engine
# that some widgets need (e.g., the Options class)
super().__init__(self)
# initialize the Ticker
self.ticker = Engine.Ticker(self)
# register event listeners
event = Events.NewGameEvent()
self.eventManager.addListener(event, self)
event = Events.ShowOptionsEvent()
self.eventManager.addListener(event, self)
event = Events.ApplyOptionsEvent()
self.eventManager.addListener(event, self)
event = Events.ResetValuesToDefaultsEvent()
self.eventManager.addListener(event, self)
event = Events.CancelOptionsEvent()
self.eventManager.addListener(event, self)
event = Events.PauseGameEvent()
self.eventManager.addListener(event, self)
event = Events.UnpauseGameEvent()
self.eventManager.addListener(event, self)
event = Events.GameOverEvent()
self.eventManager.addListener(event, self)
event = Events.LevelCompleteEvent()
self.eventManager.addListener(event, self)
event = Events.ShowStartEvent()
self.eventManager.addListener(event, self)
def postEvent(self, event):
# make a closure so we can abstract an event poster rather than having to write a seperate method to post each type of event
def generateEventPoster():
e = event()
self.eventManager.post(e)
return generateEventPoster
def makeLevel(self):
screen = Engine.Level(mouseVisible = False)
screen.score = self.options.score
screen.setBackgroundImage(self.options.levelBackgroundImage)
screen.soundAmbient = self.options.levelSoundAmbient
screen.soundAmbientStartAt = 103
screen.soundVolumeAmbient = 0.75
wall = Brick.createWall(self.options)
screen.addWidget(wall)
ball = Ball(screen)
screen.addWidget(ball)
paddle = Paddle()
screen.addWidget(paddle)
ballTracker = StatTracker(self.Stats.BALLS_REMAINING, self.options.ballsRemaining, backgroundColor = self.Colors.WHITE_TRANSLUCENT, transparentBackground = False)
ballTracker.leftEdge(self.options.levelZoneInfo.leftEdge())
ballTracker.bottomEdge(self.options.levelZoneInfo.bottomEdge())
screen.addWidget(ballTracker)
scoreTracker = StatTracker(self.Stats.SCORE, screen.score)
scoreTracker.rightEdge(self.options.levelZoneInfo.rightEdge() - 25)
scoreTracker.bottomEdge(self.options.levelZoneInfo.bottomEdge())
screen.addWidget(scoreTracker)
return screen
def makeLevelComplete(self):
screen = Engine.Layer()
screen.setFillColor(self.Colors.WHITE_TRANSLUCENT)
screen.soundAmbient = self.options.levelCompleteSoundAmbient
gameOver = Label("Level Complete")
gameOver.centerOn(self.window)
screen.addWidget(gameOver)
action = self.postEvent(Events.NewGameEvent)
continueButton = Button("Continue", buttonColor = Engine.Colors.GREY, onClickAction = action)
continueButton.setPosition(50, 500)
screen.addWidget(continueButton)
action = self.postEvent(Events.ShowStartEvent)
mainButton = Button("Main Menu", buttonColor = Engine.Colors.GREY, onClickAction = action)
mainButton.centerOn(self.window)
mainButton.setPosition(y = 500)
screen.addWidget(mainButton)
action = self.postEvent(Events.QuitEvent)
quitButton = Button("Quit", buttonColor = Engine.Colors.GREY, onClickAction = action)
quitButton.setPosition(625, 500)
screen.addWidget(quitButton)
return screen
def makeOptions(self):
screen = Engine.Layer()
title = Label("Game Options", self.Colors.BLACK)
title.setPosition(34, 27)
screen.addWidget(title)
sensitivityLabel = Label("Sensitivity", self.Colors.BLACK)
sensitivityLabel.setPosition(50, 100)
screen.addWidget(sensitivityLabel)
sensitivity = SliderWidget("sensitivity", self.options.availableSensitivities, self.options.sensitivityValue)
sensitivity.setPosition(530, sensitivityLabel.rect.y + 20)
screen.addWidget(sensitivity)
difficultyLabel = Label("Difficulty", self.Colors.BLACK)
difficultyLabel.setPosition(50, 200)
screen.addWidget(difficultyLabel)
difficulty = SliderWidget("difficulty", self.options.availableDifficulties, self.options.difficultyValue)
difficulty.setPosition(530, difficultyLabel.rect.y + 20)
screen.addWidget(difficulty)
volumeAmbientLabel = Label("Volume: Ambient", self.Colors.BLACK) #, fontSize = 36
volumeAmbientLabel.setPosition(50, 300)
screen.addWidget(volumeAmbientLabel)
volumeAmbient = SliderWidget("volumeAmbient", self.options.availableVolumes, self.options.defaultVolumeAmbient)
volumeAmbient.setPosition(530, volumeAmbientLabel.rect.y + 20)
screen.addWidget(volumeAmbient)
volumeEffectsLabel = Label("Volume: Effects", self.Colors.BLACK)
volumeEffectsLabel.setPosition(50, 400)
screen.addWidget(volumeEffectsLabel)
volumeEffects = SliderWidget("volumeEffects", self.options.availableVolumes, self.options.defaultVolumeEffects)
volumeEffects.setPosition(530, volumeEffectsLabel.rect.y + 20)
screen.addWidget(volumeEffects)
action = self.postEvent(Events.ApplyOptionsEvent)
saveButton = Button("Apply", buttonColor = self.Colors.LIGHT_GREY, onClickAction = action)
saveButton.setPosition(34, 500)
screen.addWidget(saveButton)
action = self.postEvent(Events.ResetValuesToDefaultsEvent)
defaultsButton = Button("Defaults", buttonColor = self.Colors.LIGHT_GREY, onClickAction = action)
defaultsButton.centerOn(self.window)
defaultsButton.setPosition(y = 500)
screen.addWidget(defaultsButton)
action = self.postEvent(Events.CancelOptionsEvent)
cancelButton = Button("Cancel", buttonColor = self.Colors.LIGHT_GREY, onClickAction = action)
cancelButton.setPosition(591, 500)
screen.addWidget(cancelButton)
return screen
def makePause(self):
screen = self.makeOptions()
screen.setFillColor(self.Colors.WHITE_TRANSLUCENT)
action = self.postEvent(Events.ShowStartEvent)
mainMenuButton = Button("Main Menu", buttonColor = self.Colors.LIGHT_GREY, onClickAction = action)
mainMenuButton.setPosition(517, 27)
screen.addWidget(mainMenuButton)
return screen
def makeStart(self):
screen = Engine.Layer()
screen.setBackgroundImage(self.options.backgroundImage)
screen.soundAmbient = self.options.soundAmbient
title = Label(self.options.name, self.Colors.DARK_BLUE, fontSize = 256)
title.centerOn(self.window)
title.setPosition(y = 200)
screen.addWidget(title)
action = self.postEvent(Events.NewGameEvent)
startButton = Button("Start", buttonColor = Engine.Colors.GREY, onClickAction = action)
startButton.setPosition(50, 500)
screen.addWidget(startButton)
action = self.postEvent(Events.ShowOptionsEvent)
optionsButton = Button("Options", onClickAction = action)
optionsButton.centerOn(self.window)
optionsButton.setPosition(y = 500)
screen.addWidget(optionsButton)
action = self.postEvent(Events.QuitEvent)
quitButton = Button("Quit", onClickAction = action)
quitButton.setPosition(625, 500)
screen.addWidget(quitButton)
return screen
def makeGameOver(self):
screen = Engine.Layer()
screen.setFillColor(self.Colors.WHITE_TRANSLUCENT)
gameOver = Label("Game Over")
gameOver.centerOn(self.window)
screen.addWidget(gameOver)
action = self.postEvent(Events.ShowStartEvent)
mainButton = Button("Main Menu", buttonColor = Engine.Colors.GREY, onClickAction = action)
mainButton.setPosition(50, 500)
screen.addWidget(mainButton)
action = self.postEvent(Events.QuitEvent)
quitButton = Button("Quit", buttonColor = Engine.Colors.GREY, onClickAction = action)
quitButton.setPosition(625, 500)
screen.addWidget(quitButton)
return screen
def showScreen(self, screenName):
screen = self.screens[self.activeScreen]
screen.deactivate()
self.sleep() # allow event processing to occur outside of the pygame engine
self.activeScreen = screenName
screen = self.screens[self.activeScreen]
pygame.mouse.set_visible(screen.mouseVisible)
screen.activate()
screen.playAmbientAudio()
def getCurrentStatValue(self, stat):
if stat == self.Stats.BALLS_REMAINING:
value = self.options.ballsRemaining
elif stat == self.Stats.SCORE:
value = self.options.score
else:
value = 0
print("Unknown stat:", stat, "passed to Game.getCurrentStatValue(). Returning 0.")
if "level" in self.screens.keys():
level = self.screens["level"]
trackers = level.getWidgets(StatTracker)
for tracker in trackers:
if tracker.stat == stat:
value = tracker.value()
return value
def notify(self, event):
if isinstance(event, Events.NewGameEvent):
self.screens["level"] = self.makeLevel()
self.showScreen("level")
if isinstance(event, Events.ShowOptionsEvent):
self.showScreen("options")
if isinstance(event, Events.ApplyOptionsEvent):
screen = self.screens[self.activeScreen]
filterType = SliderWidget
sliders = screen.getWidgets(filterType)
for slider in sliders:
screen.widgetValues[slider.valueKey] = slider.value
self.applyOptions(screen.widgetValues["sensitivity"], screen.widgetValues["difficulty"], screen.widgetValues["volumeAmbient"], screen.widgetValues["volumeEffects"])
if self.activeScreen == "pause":
event = Events.UnpauseGameEvent()
self.eventManager.post(event)
self.showScreen("level")
else:
self.showScreen("start")
if isinstance(event, Events.ResetValuesToDefaultsEvent):
widgets = self.screens[self.activeScreen].getWidgets()
for widget in widgets:
if hasattr(widget, "defaultValue"):
widget.setValue(widget.defaultValue)
if isinstance(event, Events.CancelOptionsEvent):
if self.activeScreen == "pause":
event = Events.UnpauseGameEvent()
self.eventManager.post(event)
self.showScreen("level")
else:
self.showScreen("start")
if isinstance(event, Events.PauseGameEvent):
if self.activeScreen == "level":
self.showScreen("pause")
if isinstance(event, Events.GameOverEvent):
# reset stats to defaults
self.options.ballsRemaining = self.defaults.ballsRemaining
self.options.score = self.defaults.score
self.showScreen("game_over")
if isinstance(event, Events.LevelCompleteEvent):
self.options.ballsRemaining = self.getCurrentStatValue(self.Stats.BALLS_REMAINING)
self.options.score = self.getCurrentStatValue(self.Stats.SCORE)
self.showScreen("level_complete")
if isinstance(event, Events.ShowStartEvent):
# either it's the first run or user quit back to main, either way reset stats to defaults
self.options.ballsRemaining = self.defaults.ballsRemaining
self.options.score = self.defaults.score
self.showScreen("start")
if isinstance(event, Events.UnpauseGameEvent):
level = self.screens["level"]
level.checkIfLevelComplete()
level.checkIfGameOver()
@staticmethod
def launch():
game = Game()
game.screens["start"] = game.makeStart()
game.screens["options"] = game.makeOptions()
game.screens["pause"] = game.makePause()
game.screens["game_over"] = game.makeGameOver()
game.screens["level_complete"] = game.makeLevelComplete()
game.activeScreen = "start"
game.showScreen(game.activeScreen)
game.eventManager.processEvents()
game.end()
Game.launch()
| |
import itertools
import math
from random import randrange
def Primes(max_num):
"""Returns all primes <= 'max_num'."""
is_prime = [True]*(max_num+1)
is_prime[0] = False
if max_num > 0: is_prime[1] = False
for i in [x for x in range(int(math.sqrt(max_num))+1) if x > 1]:
f = 2
while i*f <= max_num:
is_prime[i*f] = False
f += 1
return [x for x in range(max_num+1) if is_prime[x]]
def _TestPrimes():
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
for i in range(50):
actual = Primes(i)
expected = [x for x in primes if x <= i]
assert actual == expected
def IsPrime(num, primes=None):
if num <= 1:
return False
if num == 2 or num == 3:
return True
if num % 2 == 0 or num % 3 == 0:
return False
if primes:
for p in primes: # a sorted and complete array of primes in (1, x)
if num < p*p: return True
if num%p == 0: return False
q = max(primes[-1], 3)
else:
q = 3
while q*q <= num:
if num%q == 0:
return False
q += 2
return True
def _TestIsPrime():
max_num = 10000
primes = Primes(max_num)
for n in range(max_num):
a, b, c = IsPrime(n), IsPrime(n, primes), (n in primes)
if (not a == b == c):
print n, a, b, c
assert a == b == c
_small_primes = Primes(1000)
def RabinMiller(n, k):
"""Returns True if n passes k rounds of the Miller-Rabin primality test.
Return False if n is proved to be composite.
Source: http://stackoverflow.com/questions/14613304
"""
if n < 2: return False
for p in _small_primes:
if n < p * p: return True
if n % p == 0: return False
r, s = 0, n - 1
while s % 2 == 0:
r += 1
s //= 2
for _ in range(k):
a = randrange(2, n - 1)
x = pow(a, s, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True
def PrimeFactors(num, primes=None):
"""Returns prime factors and their exponents of the given number."""
assert num > 0
if not primes:
primes = Primes(num)
factors, exponents = [], []
for p in primes:
exp = 0
while num != 1 and num % p == 0:
num /= p
exp += 1
if exp > 0:
factors.append(p)
exponents.append(exp)
if num == 1:
break
return factors, exponents
def _TestPrimeFactors():
actual1, actual2 = PrimeFactors(1)
expected1, expected2 = [], []
assert actual1 == expected1, actual2 == expected2
actual1, actual2 = PrimeFactors(2)
expected1, expected2 = [2], [1]
assert actual1 == expected1, actual2 == expected2
actual1, actual2 = PrimeFactors(42)
expected1, expected2 = [2, 3, 7], [1, 1, 1]
assert actual1 == expected1, actual2 == expected2
actual1, actual2 = PrimeFactors(99)
expected1, expected2 = [3, 11], [2, 1]
assert actual1 == expected1, actual2 == expected2
def Products(num, min_divisor=2):
"""Naive method to generate all expressions of 'num' as a product of ints."""
if num == 1:
yield []
for divisor in range(min_divisor, num+1):
if num % divisor == 0:
for partial in Products(num/divisor, divisor):
yield partial + [divisor]
def _TestProducts():
actual = []
for product in Products(24):
actual.append('*'.join(map(str, product)))
expected = ['3*2*2*2', '6*2*2', '4*3*2', '12*2', '8*3', '6*4', '24']
assert actual == expected
def Combination(n, k):
return math.factorial(n) / math.factorial(k) / math.factorial(n-k)
def CominationWithRepetitions(n, k):
return Combination(n+k-1, k)
def Product(iterable):
p = 1
for n in iterable:
p *= n
return p
def Area(a, b, c):
"""Heron's formula."""
p = (a+b+c)/2
return math.sqrt(p*(p-a)*(p-b)*(p-c))
def DivisorsInter(factors, exponents):
"""Returns a list of all divisors of the given number (factors, exponents)."""
exp_tuples = [range(e+1) for e in exponents]
divisors = []
for exp in itertools.product(*exp_tuples):
fe = [factors[i]**exp[i] for i in range(len(exp))]
divisors.append(Product(fe))
divisors.sort()
return divisors
def Divisors(num, primes=None):
"""Returns a list of all divisors of the given number."""
factors, exponents = PrimeFactors(num, primes)
return DivisorsInter(factors, exponents)
def _TestDivisors():
actual = Divisors(1)
expected = [1]
assert actual == expected
actual = Divisors(2)
expected = [1, 2]
assert actual == expected
actual = Divisors(28)
expected = [1, 2, 4, 7, 14, 28]
assert actual == expected
actual = Divisors(16)
expected = [1, 2, 4, 8, 16]
assert actual == expected
actual = Divisors(108)
expected = [1, 2, 3, 4, 6, 9, 12, 18, 27, 36, 54, 108]
assert actual == expected
def GenComposites(primes, max_num):
"""Generates composites (<= 'max_num') using input 'primes' set."""
def Try(factors):
num = Product(factors)
if len(factors) >= 2 and num <= max_num:
yield num, factors
for p in primes: # primes are sorted
if num*p > max_num:
break
if len(factors) == 0 or p >= factors[-1]:
for n, f in Try(factors+[p]):
yield n, f
return Try([])
def _TestGenComposites():
actual = [''.join(map(str, f)) for n,f in GenComposites([2,3,5], 20)]
expected = ['22', '222', '2222', '223', '225', '23', '233', '25', '33', '35']
assert actual == expected
max_num = 10000
primes = Primes(max_num)
composites = [n for n, f in GenComposites(primes, max_num)]
assert len(composites) == len(set(composites))
assert len(primes) + len(composites) == max_num - 1
def Group(array):
"""Groups duplicate elements, e.g. [2, 1, 2, 2, 3] => [1, 2, 3], [1, 3, 1]."""
array.sort()
uniq, cnts = [], []
for i in array:
if len(uniq) == 0 or i != uniq[-1]:
uniq.append(i)
cnts.append(1)
else:
cnts[-1] += 1
return uniq, cnts
def _TestGroup():
actual1, actual2 = Group([2, 1, 2, 2, 3])
expected1, expected2 = [1, 2, 3], [1, 3, 1]
assert actual1 == expected1
assert actual2 == expected2
def SumOfDivisors(num, primes=None):
"""Sum of positive divisors."""
factors, exponents = PrimeFactors(num, primes)
# http://en.wikipedia.org/wiki/Divisor_function.
sigma = 1
for i in range(len(factors)):
p, a = factors[i], exponents[i]
s = sum([p**e for e in range(a+1)])
sigma *= s
return sigma
def _TestSumOfDivisors():
for i in range(1, 100):
expected = sum(Divisors(i))
actual = SumOfDivisors(i)
assert expected == actual
def Flatten(matrix):
"""Flattens a 2d array 'matrix' to an array."""
array = []
for a in matrix:
array += a
return array
def MulMod(a, b, m):
if a >= m:
a %= m
if b >= m:
b %= m
return a*b%m
def PowMod(a, e, m):
"""Deprecated. Use pow(a, e, m) instead."""
if e == 0:
return 1%m
if e == 1:
return a%m
return MulMod(PowMod(a, e/2, m), PowMod(a, e-e/2, m), m)
def _TestPowMod():
for e in range(20):
assert 2**e%11 == PowMod(2, e, 11)
assert 3**e%11 == PowMod(3, e, 11)
def Permutations(seq):
"""Yield only unique permutations of seq in an efficient way.
http://stackoverflow.com/questions/12836385
"""
# Precalculate the indices we'll be iterating over for speed
i_indices = range(len(seq) - 1, -1, -1)
k_indices = i_indices[1:]
# The algorithm specifies to start with a sorted version
seq = sorted(seq)
while True:
yield seq
# Working backwards from the last-but-one index, k
# we find the index of the first decrease in value. 0 0 1 0 1 1 1 0
for k in k_indices:
if seq[k] < seq[k + 1]:
break
else:
# Introducing the slightly unknown python for-else syntax:
# else is executed only if the break statement was never reached.
# If this is the case, seq is weakly decreasing, and we're done.
return
# Get item from sequence only once, for speed
k_val = seq[k]
# Working backwards starting with the last item, k i
# find the first one greater than the one at k 0 0 1 0 1 1 1 0
for i in i_indices:
if k_val < seq[i]:
break
# Swap them in the most efficient way
(seq[k], seq[i]) = (seq[i], seq[k]) # k i
# 0 0 1 1 1 1 0 0
# Reverse the part after but not k
# including k, also efficiently. 0 0 1 1 0 0 1 1
seq[k + 1:] = seq[-1:k:-1]
def _TestPermutations():
perm = set([','.join(p) for p in Permutations(['1', '2', '1'])])
assert perm == set(['1,1,2', '1,2,1', '2,1,1'])
class Memorize(object):
def __init__(self, f):
self.func = f
self.memo = {}
def __call__(self, *args):
if not args in self.memo:
self.memo[args] = self.func(*args)
return self.memo[args]
def IsPalindrome(s):
return s == s[::-1]
def ExtendedGcd(a, b):
# To find the greatest common divisor of a and b, use fractions.gcd(a, b).
# This method returns (x, y, gcd), where a*x + b*y = gcd.
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r != 0:
q = old_r / r
old_r, r = r, old_r - q*r
old_s, s = s, old_s - q*s
old_t, t = t, old_t - q*t
return old_s, old_t, old_r
def _TestExtendedGcd():
a, b = 240, 46
x, y, c = ExtendedGcd(a, b)
assert (x, y, c) == (-9, 47, 2)
a, b = 3, 11
x, y, c = ExtendedGcd(a, b)
assert (x, y, c) == (4, -1, 1)
if __name__ == '__main__':
_TestPrimes()
_TestIsPrime()
_TestPrimeFactors()
_TestProducts()
_TestDivisors()
_TestGenComposites()
_TestGroup()
_TestSumOfDivisors()
_TestPowMod()
_TestPermutations()
_TestExtendedGcd()
print 'pass'
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import datetime
import decimal
from unittest import mock
from oslo_utils import uuidutils
from cloudkitty import dataframe
from cloudkitty.rating import hash
from cloudkitty.rating.hash.db import api
from cloudkitty import tests
TEST_TS = 1388577600
FAKE_UUID = '6c1b8a30-797f-4b7e-ad66-9879b79059fb'
CK_RESOURCES_DATA = [dataframe.DataFrame.from_dict({
"period": {
"begin": datetime.datetime(2014, 10, 1),
"end": datetime.datetime(2014, 10, 1, 1)},
"usage": {
"compute": [
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "f5600101-8fa2-4864-899e-ebcb7ed6b568",
"memory": "64",
"metadata": {
"farm": "prod"},
"name": "prod1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
# Integer rather than a string on purpose
"vcpus": 1},
"vol": {
"qty": 1,
"unit": "instance"}
},
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.tiny",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "512",
"metadata": {
"farm": "dev"},
"name": "dev1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"},
"vol": {
"qty": 2,
"unit": "instance"}},
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.xlarge",
"image_id": "0052f884-461d-4e3a-9598-7e5391888209",
"memory": "16384",
"metadata": {
"farm": "dev"},
"name": "dev1",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
# Integer rather than a string on purpose
"vcpus": 8},
"vol": {
"qty": 2,
"unit": "instance"}},
{
"desc": {
"availability_zone": "nova",
"flavor": "m1.nano",
"image_id": "a41fba37-2429-4f15-aa00-b5bc4bf557bf",
"memory": "64",
"metadata": {
"farm": "dev"},
"name": "dev2",
"project_id": "f266f30b11f246b589fd266f85eeec39",
"user_id": "55b3379b949243009ee96972fbf51ed1",
"vcpus": "1"},
"vol": {
"qty": 1,
"unit": "instance"}}]}}, legacy=True)]
class HashMapRatingTest(tests.TestCase):
def setUp(self):
super(HashMapRatingTest, self).setUp()
self._tenant_id = 'f266f30b11f246b589fd266f85eeec39'
self._db_api = hash.HashMap.db_api
self._db_api.get_migration().upgrade('head')
self._hash = hash.HashMap(self._tenant_id)
# Group tests
@mock.patch.object(uuidutils, 'generate_uuid',
return_value=FAKE_UUID)
def test_create_group(self, patch_generate_uuid):
self._db_api.create_group('test_group')
groups = self._db_api.list_groups()
self.assertEqual([FAKE_UUID], groups)
patch_generate_uuid.assert_called_once_with()
def test_create_duplicate_group(self):
self._db_api.create_group('test_group')
self.assertRaises(api.GroupAlreadyExists,
self._db_api.create_group,
'test_group')
def test_delete_group(self):
group_db = self._db_api.create_group('test_group')
self._db_api.delete_group(group_db.group_id)
groups = self._db_api.list_groups()
self.assertEqual([], groups)
def test_delete_unknown_group(self):
self.assertRaises(api.NoSuchGroup,
self._db_api.delete_group,
uuidutils.generate_uuid())
def test_recursive_delete_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
self._db_api.delete_group(group_db.group_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id)
self.assertEqual([], mappings)
groups = self._db_api.list_groups()
self.assertEqual([], groups)
def test_non_recursive_delete_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
self._db_api.delete_group(group_db.group_id, False)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id)
self.assertEqual([mapping_db.mapping_id], mappings)
groups = self._db_api.list_groups()
self.assertEqual([], groups)
new_mapping_db = self._db_api.get_mapping(mapping_db.mapping_id)
self.assertIsNone(new_mapping_db.group_id)
def test_list_mappings_from_only_group(self):
service_db = self._db_api.create_service('compute')
group_db = self._db_api.create_group('test_group')
mapping_tiny = self._db_api.create_mapping(
cost='1.337',
map_type='flat',
service_id=service_db.service_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
cost='42',
map_type='flat',
service_id=service_db.service_id)
mappings = self._db_api.list_mappings(group_uuid=group_db.group_id)
self.assertEqual([mapping_tiny.mapping_id],
mappings)
def test_list_mappings_from_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
mapping_tiny = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
mapping_small = self._db_api.create_mapping(
value='m1.small',
cost='3.1337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='m1.large',
cost='42',
map_type='flat',
field_id=field_db.field_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id,
group_uuid=group_db.group_id)
self.assertEqual([mapping_tiny.mapping_id,
mapping_small.mapping_id],
mappings)
def test_list_mappings_without_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='m1.small',
cost='3.1337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id)
mapping_no_group = self._db_api.create_mapping(
value='m1.large',
cost='42',
map_type='flat',
field_id=field_db.field_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id,
no_group=True)
self.assertEqual([mapping_no_group.mapping_id],
mappings)
# Service tests
@mock.patch.object(uuidutils, 'generate_uuid',
return_value=FAKE_UUID)
def test_create_service(self, patch_generate_uuid):
self._db_api.create_service('compute')
services = self._db_api.list_services()
self.assertEqual([FAKE_UUID], services)
patch_generate_uuid.assert_called_once_with()
def test_create_duplicate_service(self):
self._db_api.create_service('compute')
self.assertRaises(api.ServiceAlreadyExists,
self._db_api.create_service,
'compute')
def test_delete_service_by_name(self):
self._db_api.create_service('compute')
self._db_api.delete_service('compute')
services = self._db_api.list_services()
self.assertEqual([], services)
def test_delete_service_by_uuid(self):
service_db = self._db_api.create_service('compute')
self._db_api.delete_service(uuid=service_db.service_id)
services = self._db_api.list_services()
self.assertEqual([], services)
def test_delete_unknown_service_by_name(self):
self.assertRaises(api.NoSuchService,
self._db_api.delete_service,
'dummy')
def test_delete_unknown_service_by_uuid(self):
self.assertRaises(
api.NoSuchService,
self._db_api.delete_service,
uuid='6e8de9fc-ee17-4b60-b81a-c9320e994e76')
# Field tests
def test_create_field_in_existing_service(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
fields = self._db_api.list_fields(service_db.service_id)
self.assertEqual([field_db.field_id], fields)
def test_create_duplicate_field(self):
service_db = self._db_api.create_service('compute')
self._db_api.create_field(service_db.service_id,
'flavor')
self.assertRaises(api.FieldAlreadyExists,
self._db_api.create_field,
service_db.service_id,
'flavor')
def test_delete_field(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id, 'flavor')
self._db_api.delete_field(field_db.field_id)
services = self._db_api.list_services()
self.assertEqual([service_db.service_id], services)
fields = self._db_api.list_fields(service_db.service_id)
self.assertEqual([], fields)
def test_delete_unknown_field(self):
self.assertRaises(api.NoSuchField,
self._db_api.delete_field,
uuidutils.generate_uuid())
def test_recursive_delete_field_from_service(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
self._db_api.delete_service(uuid=service_db.service_id)
self.assertRaises(api.NoSuchField,
self._db_api.get_field,
field_db.field_id)
# Mapping tests
def test_create_mapping(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id)
self.assertEqual([mapping_db.mapping_id], mappings)
def test_get_mapping(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
mapping = self._db_api.get_mapping(mapping_db.mapping_id)
self.assertEqual('flat', mapping.map_type)
self.assertEqual('m1.tiny', mapping.value)
self.assertEqual(
decimal.Decimal('1.3369999999999999662492200514'), mapping.cost)
self.assertEqual(field_db.id, mapping.field_id)
def test_list_mappings_from_services(self):
service_db = self._db_api.create_service('compute')
mapping_db = self._db_api.create_mapping(
cost='1.337',
map_type='flat',
service_id=service_db.service_id)
mappings = self._db_api.list_mappings(
service_uuid=service_db.service_id)
self.assertEqual([mapping_db.mapping_id], mappings)
def test_list_mappings_from_fields(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
mappings = self._db_api.list_mappings(
field_uuid=field_db.field_id)
self.assertEqual([mapping_db.mapping_id], mappings)
def test_create_mapping_with_incorrect_type(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
self.assertRaises(api.NoSuchType,
self._db_api.create_mapping,
value='m1.tiny',
cost='1.337',
map_type='invalid',
field_id=field_db.field_id)
def test_create_mapping_with_two_parents(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
self.assertRaises(api.ClientHashMapError,
self._db_api.create_mapping,
value='m1.tiny',
cost='1.337',
map_type='flat',
service_id=service_db.service_id,
field_id=field_db.field_id)
def test_update_mapping(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
new_mapping_db = self._db_api.update_mapping(
uuid=mapping_db.mapping_id,
value='42',
map_type='rate')
self.assertEqual('42', new_mapping_db.value)
self.assertEqual('rate', new_mapping_db.map_type)
def test_update_mapping_inside_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
group_db = self._db_api.create_group('test_group')
new_mapping_db = self._db_api.update_mapping(
mapping_db.mapping_id,
value='42',
map_type='rate',
group_id=group_db.group_id)
self.assertEqual('42', new_mapping_db.value)
self.assertEqual('rate', new_mapping_db.map_type)
self.assertEqual(group_db.id, new_mapping_db.group_id)
def test_delete_mapping(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
self._db_api.delete_mapping(mapping_db.mapping_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id)
self.assertEqual([], mappings)
def test_create_per_tenant_mapping(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
mappings = self._db_api.list_mappings(field_uuid=field_db.field_id)
self.assertEqual(
self._tenant_id,
mapping_db.tenant_id)
self.assertEqual([mapping_db.mapping_id], mappings)
def test_list_mappings_filtering_on_tenant(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
self._db_api.create_mapping(
value='m1.small',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
mappings = self._db_api.list_mappings(
field_uuid=field_db.field_id,
tenant_uuid=self._tenant_id)
self.assertEqual([mapping_db.mapping_id], mappings)
def test_list_mappings_filtering_on_no_tenant(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'flavor')
mapping_db = self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id)
self._db_api.create_mapping(
value='m1.small',
cost='1.337',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
mappings = self._db_api.list_mappings(
field_uuid=field_db.field_id,
tenant_uuid=None)
self.assertEqual([mapping_db.mapping_id], mappings)
# Threshold tests
def test_create_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
thresholds = self._db_api.list_thresholds(field_uuid=field_db.field_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_get_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='rate',
field_id=field_db.field_id)
threshold = self._db_api.get_threshold(threshold_db.threshold_id)
self.assertEqual('rate', threshold.map_type)
self.assertEqual(decimal.Decimal('64'), threshold.level)
self.assertEqual(
decimal.Decimal('0.1337000000000000132782673745'), threshold.cost)
self.assertEqual(field_db.id, threshold.field_id)
def test_list_thresholds_from_only_group(self):
service_db = self._db_api.create_service('compute')
group_db = self._db_api.create_group('test_group')
threshold_db = self._db_api.create_threshold(
level=10,
cost='1.337',
map_type='flat',
service_id=service_db.service_id,
group_id=group_db.group_id)
thresholds = self._db_api.list_thresholds(
group_uuid=group_db.group_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_list_thresholds_from_services(self):
service_db = self._db_api.create_service('compute')
threshold_db = self._db_api.create_threshold(
level=10,
cost='1.337',
map_type='flat',
service_id=service_db.service_id)
thresholds = self._db_api.list_thresholds(
service_uuid=service_db.service_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_list_thresholds_from_fields(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
thresholds = self._db_api.list_thresholds(field_uuid=field_db.field_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_create_threshold_with_incorrect_type(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
self.assertRaises(
api.NoSuchType,
self._db_api.create_threshold,
level='64',
cost='0.1337',
map_type='invalid',
field_id=field_db.field_id)
def test_create_threshold_with_two_parents(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
self.assertRaises(
api.ClientHashMapError,
self._db_api.create_threshold,
level='64',
cost='0.1337',
map_type='flat',
service_id=service_db.service_id,
field_id=field_db.field_id)
def test_update_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
new_threshold_db = self._db_api.update_threshold(
uuid=threshold_db.threshold_id,
level='128',
map_type='rate')
self.assertEqual('128', new_threshold_db.level)
self.assertEqual('rate', new_threshold_db.map_type)
def test_update_threshold_inside_group(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
group_db = self._db_api.create_group('test_group')
new_threshold_db = self._db_api.update_threshold(
threshold_db.threshold_id,
group_id=group_db.group_id)
self.assertEqual(group_db.id, new_threshold_db.group_id)
def test_delete_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
self._db_api.delete_threshold(threshold_db.threshold_id)
thresholds = self._db_api.list_thresholds(field_uuid=field_db.field_id)
self.assertEqual([], thresholds)
def test_create_per_tenant_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
thresholds = self._db_api.list_thresholds(field_uuid=field_db.field_id)
self.assertEqual(
self._tenant_id,
threshold_db.tenant_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_list_thresholds_filtering_on_tenant(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
self._db_api.create_threshold(
level='128',
cost='0.2',
map_type='flat',
field_id=field_db.field_id)
thresholds = self._db_api.list_thresholds(
field_uuid=field_db.field_id,
tenant_uuid=self._tenant_id)
self.assertEqual([threshold_db.threshold_id], thresholds)
def test_list_thresholds_filtering_on_no_tenant(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(
service_db.service_id,
'memory')
threshold_db = self._db_api.create_threshold(
level='64',
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
self._db_api.create_threshold(
level='128',
cost='0.2',
map_type='flat',
field_id=field_db.field_id,
tenant_id=self._tenant_id)
thresholds = self._db_api.list_thresholds(
field_uuid=field_db.field_id,
tenant_uuid=None)
self.assertEqual([threshold_db.threshold_id], thresholds)
# Processing tests
def _generate_hashmap_rules(self):
mapping_list = []
threshold_list = []
service_db = self._db_api.create_service('compute')
flavor_field = self._db_api.create_field(service_db.service_id,
'flavor')
memory_field = self._db_api.create_field(service_db.service_id,
'memory')
group_db = self._db_api.create_group('test_group')
mapping_list.append(
self._db_api.create_mapping(
cost='1.42',
map_type='rate',
service_id=service_db.service_id))
mapping_list.append(
self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=flavor_field.field_id))
mapping_list.append(
self._db_api.create_mapping(
value='m1.large',
cost='13.37',
map_type='rate',
field_id=flavor_field.field_id,
group_id=group_db.group_id))
# Per tenant override
mapping_list.append(
self._db_api.create_mapping(
value='m1.tiny',
cost='2',
map_type='flat',
field_id=flavor_field.field_id,
tenant_id=self._tenant_id))
threshold_list.append(
self._db_api.create_threshold(
level='64',
cost='0.02',
map_type='flat',
field_id=memory_field.field_id,
group_id=group_db.group_id))
threshold_list.append(
self._db_api.create_threshold(
level='128',
cost='0.03',
map_type='flat',
field_id=memory_field.field_id,
group_id=group_db.group_id))
threshold_list.append(
self._db_api.create_threshold(
level='64',
cost='0.03',
map_type='flat',
field_id=memory_field.field_id,
group_id=group_db.group_id,
tenant_id=self._tenant_id))
return ([mapping.mapping_id for mapping in mapping_list],
[threshold.threshold_id for threshold in threshold_list])
def test_load_rates(self):
self._generate_hashmap_rules()
self._hash.reload_config()
expect = {
'compute': {
'fields': {
'flavor': {
'mappings': {
'_DEFAULT_': {
'm1.tiny': {
'cost': decimal.Decimal(
'2.0000000000000000000000000000'),
'type': 'flat'}},
'test_group': {
'm1.large': {
'cost': decimal.Decimal(
'13.3699999999999992184029906639'),
'type': 'rate'}}},
'thresholds': {}},
'memory': {
'mappings': {},
'thresholds': {
'test_group': {
64: {
'cost': decimal.Decimal(
'0.0299999999999999988897769754'),
'type': 'flat'},
128: {
'cost': decimal.Decimal(
'0.0299999999999999988897769754'),
'type': 'flat'}}}}},
'mappings': {
'_DEFAULT_': {
'cost': decimal.Decimal(
'1.4199999999999999289457264240'),
'type': 'rate'}},
'thresholds': {}}}
self.assertEqual(expect,
self._hash._entries)
def test_load_mappings(self):
mapping_list = []
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
mapping_list.append(
self._db_api.create_mapping(
value='m1.tiny',
cost='1.337',
map_type='flat',
field_id=field_db.field_id))
mapping_list.append(
self._db_api.create_mapping(
value='m1.large',
cost='13.37',
map_type='rate',
field_id=field_db.field_id,
group_id=group_db.group_id))
mappings_uuid = [mapping.mapping_id for mapping in mapping_list]
result = self._hash._load_mappings(mappings_uuid)
expected_result = {
'_DEFAULT_': {
'm1.tiny': {
'cost': decimal.Decimal('1.3369999999999999662492200514'),
'type': 'flat'}},
'test_group': {
'm1.large': {
'cost': decimal.Decimal('13.3699999999999992184029906639'),
'type': 'rate'}}}
self.assertEqual(expected_result, result)
def test_load_thresholds(self):
threshold_list = []
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'flavor')
group_db = self._db_api.create_group('test_group')
threshold_list.append(
self._db_api.create_threshold(
level='1000',
cost='3.1337',
map_type='flat',
field_id=field_db.field_id,
group_id=group_db.group_id))
thresholds_uuid = [threshold.threshold_id
for threshold in threshold_list]
result = self._hash._load_thresholds(thresholds_uuid)
expected_result = {
'test_group': {
1000: {
'cost': decimal.Decimal('3.1337000000000001520561454527'),
'type': 'flat'}}}
self.assertEqual(expected_result, result)
def test_process_services(self):
service_db = self._db_api.create_service('compute')
group_db = self._db_api.create_group('test_group')
self._db_api.create_mapping(
cost='1.337',
map_type='flat',
service_id=service_db.service_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
cost='1.42',
map_type='flat',
service_id=service_db.service_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_services(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
compute_list = df_dicts[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal(
'2.756999999999999895194946475')}
compute_list[1]['rating'] = {'price': decimal.Decimal(
'5.513999999999999790389892950')}
compute_list[2]['rating'] = {'price': decimal.Decimal(
'5.513999999999999790389892950')}
compute_list[3]['rating'] = {'price': decimal.Decimal(
'2.756999999999999895194946475')}
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
def test_process_fields(self):
service_db = self._db_api.create_service('compute')
flavor_field = self._db_api.create_field(service_db.service_id,
'flavor')
image_field = self._db_api.create_field(service_db.service_id,
'image_id')
group_db = self._db_api.create_group('test_group')
self._db_api.create_mapping(
value='m1.nano',
cost='1.337',
map_type='flat',
field_id=flavor_field.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='a41fba37-2429-4f15-aa00-b5bc4bf557bf',
cost='1.10',
map_type='rate',
field_id=image_field.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='m1.tiny',
cost='1.42',
map_type='flat',
field_id=flavor_field.field_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_fields(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
compute_list = df_dicts[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal(
'1.336999999999999966249220051')}
compute_list[1]['rating'] = {'price': decimal.Decimal(
'2.839999999999999857891452848')}
compute_list[2]['rating'] = {'price': decimal.Decimal('0')}
compute_list[3]['rating'] = {'price': decimal.Decimal(
'1.470700000000000081623596770')}
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
def test_process_fields_no_match(self):
service_db = self._db_api.create_service('compute')
flavor_field = self._db_api.create_field(service_db.service_id,
'flavor')
self._db_api.create_mapping(
value='non-existent',
cost='1.337',
map_type='flat',
field_id=flavor_field.field_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_fields(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
def test_process_field_threshold(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'memory')
self._db_api.create_threshold(
level=64,
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
self._db_api.create_threshold(
level=128,
cost='0.2',
map_type='flat',
field_id=field_db.field_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_fields(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
compute_list = df_dicts[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal(
'0.1337000000000000132782673745')}
compute_list[1]['rating'] = {'price': decimal.Decimal(
'0.4000000000000000222044604926')}
compute_list[2]['rating'] = {'price': decimal.Decimal(
'0.4000000000000000222044604926')}
compute_list[3]['rating'] = {'price': decimal.Decimal(
'0.1337000000000000132782673745')}
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
def test_process_field_threshold_no_match(self):
service_db = self._db_api.create_service('compute')
field_db = self._db_api.create_field(service_db.service_id,
'memory')
self._db_api.create_threshold(
level=32768,
cost='0.1337',
map_type='flat',
field_id=field_db.field_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_fields(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
self.assertEqual([d.as_dict(mutable=True) for d in expected_data],
[d.as_dict(mutable=True) for d in actual_data])
def test_process_service_threshold(self):
service_db = self._db_api.create_service('compute')
self._db_api.create_threshold(
level=1,
cost='0.1',
map_type='flat',
service_id=service_db.service_id)
self._db_api.create_threshold(
level=2,
cost='0.15',
map_type='flat',
service_id=service_db.service_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
for cur_data in expected_data:
for service_name, point in cur_data.iterpoints():
self._hash._res = {}
self._hash.process_services(service_name, point)
actual_data.add_point(
self._hash.add_rating_informations(point), service_name)
actual_data = [actual_data]
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
compute_list = df_dicts[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal(
'0.1000000000000000055511151231')}
compute_list[1]['rating'] = {'price': decimal.Decimal(
'0.1499999999999999944488848769')}
compute_list[2]['rating'] = {'price': decimal.Decimal(
'0.1499999999999999944488848769')}
compute_list[3]['rating'] = {'price': decimal.Decimal(
'0.1000000000000000055511151231')}
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
def test_update_result_flat(self):
self._hash.update_result(
'test_group',
'flat',
1)
self.assertEqual(1, self._hash._res['test_group']['flat'])
self._hash.update_result(
'test_group',
'flat',
0.5)
self.assertEqual(1, self._hash._res['test_group']['flat'])
self._hash.update_result(
'test_group',
'flat',
1.5)
self.assertEqual(1.5, self._hash._res['test_group']['flat'])
def test_update_result_rate(self):
self._hash.update_result(
'test_group',
'rate',
0.5)
self.assertEqual(0.5, self._hash._res['test_group']['rate'])
self._hash.update_result(
'test_group',
'rate',
0.5)
self.assertEqual(0.25, self._hash._res['test_group']['rate'])
self._hash.update_result(
'test_group',
'rate',
1)
self.assertEqual(0.25, self._hash._res['test_group']['rate'])
def test_update_result_threshold(self):
self._hash.update_result(
'test_group',
'flat',
0.01,
0,
True)
self.assertEqual({'level': 0,
'cost': 0.01,
'scope': 'field',
'type': 'flat'},
self._hash._res['test_group']['threshold'])
self._hash.update_result(
'test_group',
'flat',
1,
10,
True)
self.assertEqual({'level': 10,
'cost': 1,
'scope': 'field',
'type': 'flat'},
self._hash._res['test_group']['threshold'])
self._hash.update_result(
'test_group',
'flat',
1.1,
15,
True)
self.assertEqual({'level': 15,
'cost': 1.1,
'scope': 'field',
'type': 'flat'},
self._hash._res['test_group']['threshold'])
self._hash.update_result(
'test_group',
'threshold',
2.2,
10,
True)
self.assertEqual({'level': 15,
'cost': 1.1,
'scope': 'field',
'type': 'flat'},
self._hash._res['test_group']['threshold'])
def test_process_rating(self):
service_db = self._db_api.create_service('compute')
flavor_db = self._db_api.create_field(service_db.service_id,
'flavor')
vcpus_db = self._db_api.create_field(service_db.service_id,
'vcpus')
group_db = self._db_api.create_group('test_group')
second_group_db = self._db_api.create_group('second_test_group')
self._db_api.create_mapping(
cost='1.00',
map_type='flat',
service_id=service_db.service_id)
self._db_api.create_mapping(
value='m1.nano',
cost='1.337',
map_type='flat',
field_id=flavor_db.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='m1.tiny',
cost='1.42',
map_type='flat',
field_id=flavor_db.field_id,
group_id=group_db.group_id)
self._db_api.create_mapping(
value='8',
cost='16.0',
map_type='flat',
field_id=vcpus_db.field_id,
group_id=second_group_db.group_id)
image_db = self._db_api.create_field(service_db.service_id,
'image_id')
self._db_api.create_mapping(
value='a41fba37-2429-4f15-aa00-b5bc4bf557bf',
cost='1.10',
map_type='rate',
field_id=image_db.field_id,
group_id=group_db.group_id)
memory_db = self._db_api.create_field(service_db.service_id,
'memory')
self._db_api.create_threshold(
level=64,
cost='0.15',
map_type='flat',
field_id=memory_db.field_id,
group_id=group_db.group_id)
self._db_api.create_threshold(
level=128,
cost='0.2',
map_type='flat',
field_id=memory_db.field_id,
group_id=group_db.group_id)
self._hash.reload_config()
expected_data = copy.deepcopy(CK_RESOURCES_DATA)
actual_data = dataframe.DataFrame(start=expected_data[0].start,
end=expected_data[0].end)
df_dicts = [d.as_dict(mutable=True) for d in expected_data]
compute_list = df_dicts[0]['usage']['compute']
compute_list[0]['rating'] = {'price': decimal.Decimal(
'2.486999999999999960698104928')}
compute_list[1]['rating'] = {'price': decimal.Decimal(
'5.564000000000000155875312656')}
# 8vcpu mapping * 2 + service_mapping * 1 + 128m ram threshold * 2
compute_list[2]['rating'] = {'price': decimal.Decimal(
'34.40000000000000002220446049')}
compute_list[3]['rating'] = {'price': decimal.Decimal(
'2.635700000000000088840046430')}
actual_data = [self._hash.process(d) for d in expected_data]
self.assertEqual(df_dicts, [d.as_dict(mutable=True)
for d in actual_data])
| |
'''
helpers functions
@author: Anas Elghafari
'''
import params
import math
#def read_sequence(s):
# s_stripped = s.strip().replace("-", "")
# params.sequence = s_stripped
# params.seq_as_codons = [params.sequence[i:i+3]
# for i in range(0,len(params.sequence)-2, 3)]
def read_eth_matrix(filename):
'''
parses the substitution frequencies from the ETH matrix in the given
file.
'''
f = file(filename)
codons = params.order_of_codons_eth
matrix = {}
#each codon is a line in the file:
for row_c in codons:
l = f.readline()
values = l.split()
values = list(map(float, values[1:]))
pairs = zip(codons, values)
for p in pairs:
matrix.setdefault(p[0], {})[row_c] = p[1]
params.eth_codon_subs_probs = matrix
params.eth_codon_insertion_probs = get_eth_insert_probs(matrix)
#After having set the insertion probs based on the sub probs, we can modify
#the sub probs to add stopcodons.
#First we decrease the original values enough to allow us to add stop
#codon emission probability while still having the emission probs sum to 1
for c1 in params.eth_codon_subs_probs.keys():
for c2 in params.eth_codon_subs_probs.keys():
original_value = params.eth_codon_subs_probs[c1][c2]
params.eth_codon_subs_probs[c1][c2] = _normalize(original_value)
#second: we add the stop codon emission prob
_add_stopcodon_probs("eth")
def get_eth_insert_probs(codon_sub_probs):
'''
Calculates the codon insertion probabilities based on the ETH matrix
If this function is used then codon insertion probabilities will not be even
instead, the insertion probabilities of codon XYZ will correspond to
the probability of a random codon mutating into XYZ.
Some tests have shown such insertion probs to be better (i.e. yield
better alignments) than uniform insertion probs
'''
raw_insertion_scores = {}
for codon in params.order_of_codons_eth:
l = [codon_sub_probs[x][codon]
for x in params.order_of_codons_eth]
raw_insertion_scores[codon] = sum(l)
normalized_probs = {}
scores_sum = sum(raw_insertion_scores.values())
for codon in params.order_of_codons_eth:
normalized_probs[codon]= raw_insertion_scores[codon]/scores_sum
#remove stop codons and get their propability to know by how much to increase the non-stop-codons props
stop_codon_sum = normalized_probs['TAA'] + normalized_probs['TAG'] + normalized_probs['TGA']
normalized_probs['TAA'] = 0
normalized_probs['TAG'] = 0
normalized_probs['TGA'] = 0
#print("sum of stop codon probs from eth matrix", stop_codon_sum)
fix_factor = stop_codon_sum/1.0
for codon, p in normalized_probs.items():
normalized_probs[codon] = p * (1+fix_factor)
#print(normalized_probs)
#print("sum" + str(sum(normalized_probs.values())))
return normalized_probs
def read_BLOSUM_matrix(filename):
'''
Parses the aminoacid substitution frequencies from the BLOSUM matrix.
Uses the aminoacid sub probs to calculate codon sub probs, with the
assumption that, if an aminoacid A can be encoded by codons C1, C2, C3,
then all those three encodings are equally likely.
The function also adds the probability that a codon can be substituted
by a stop codon. (see function _add_stopcodon_prob).
'''
f = open(filename, 'r')
bmatrix_raw = []
for line in f:
bmatrix_raw.append(line.split())
f.close()
for i in range(len(bmatrix_raw)):
params.BLOSUM_matrix.append([])
for j in range(len(bmatrix_raw[i])):
params.BLOSUM_matrix[i].append(float(bmatrix_raw[i][j]))
for j in range(len(params.BLOSUM_matrix[i]), 20):
params.BLOSUM_matrix[i].append(float(bmatrix_raw[j][i]))
#print(BLOSUM_matrix)
_calculate_codon_insertion_probs()
_calculate_aminoacid_subs_probs()
_calculate_codon_subs_probs()
_add_stopcodon_probs('blosum')
_set_uniform_distribution()
_set_uniform_dist_stop_codons()
def read_profile(profile_file):
'''
function to parse a profile, such as acceptor profile
function assumes that the first line in file sepcifies the header, i.e.
the order of columns (e.g. A C G T).
The following lines specify the emission prob for each of A, C,G, T.
Line x specifies the emission prob at position x (state x)
'''
dists = []
lines = open(profile_file).readlines()
header = lines[0].strip().split("\t")
#print("header now is: " + str(header))
a_index = header.index("A")
t_index = header.index("T")
c_index = header.index("C")
g_index = header.index("G")
for l in lines[1:]:
lparts = l.strip().split("\t")
#print("lparts now is: " + str(lparts))
dists.append(_build_emissions_table(lparts[a_index], lparts[t_index],
lparts[c_index], lparts[g_index]))
return dists
def _build_emissions_table(a_prob, t_prob, c_prob, g_prob):
table = dict()
_add_trios(table, "A", a_prob)
_add_trios(table, "T", t_prob)
_add_trios(table, "C", c_prob)
_add_trios(table, "G", g_prob)
return table
def _add_trios(table, symbol, prob):
p = float(prob)/16
trios = [symbol + x + y for x in ["A", "T", "C", "G"]
for y in ["A", "T", "C", "G"]]
for trio in trios:
table[trio] = p
return
def _calculate_codon_insertion_probs():
'''
This function calculates the insertion probability for each codon, using
the BLOSUM matrix (the frequencies version, NOT THE SCORES version).
IN the BLOSUM frequencies matrix, a cell ij stores a number that represents
the probability of aminoacid_i being along with aminoacid_j. Those numbers sum up
to 1 for the entire matrix, not for each single line.
For the i_th line (row) in the matrix, the *sum* of the numbers in that line
represent the probability of aminoacid_i being aligned with any other aminoacid. i.e.
the sum represents the probability that aminoacid occuring at all.
Some aminoacids aremore common than others. e.g.
So the sum of each line is the probability of the aminoacid being seen.
We also assume it's the probability of the aminoacid being inserted.
To get the codon insertion prob we divide the aminoacid insertion prob
by the number of codons that encode that aminoacid.
'''
total_prob = 0
for i in range(len(params.BLOSUM_matrix)):
aminoacid = params.aminoacids[i]
aminoacid_prob = sum(params.BLOSUM_matrix[i])
total_prob += aminoacid_prob
#print("current aminoacid prob: " + str(aminoacid_prob))
#print("aminoacid is :" + str(aminoacid))
num_codons = len(params.aminoacids_to_codons[aminoacid])
codon_prob = aminoacid_prob/num_codons
for codon in params.aminoacids_to_codons[aminoacid]:
params.codon_insertion_probs[codon] = codon_prob
#print("total probability: " + str(total_prob))
'''
If we need to change to uniform insertion probs:
p = 1.0/61
for c in params.codons:
params.codon_insertion_probs[c] = p
'''
for sc in params.stop_codons:
params.codon_insertion_probs[sc] = 0
def _calculate_aminoacid_subs_probs():
#calculate the aminacid substitution probabilities from the
#(aminoacid1, aminoacid1) co-occurence frequencies in the BLOSUM matrix
#This is done thus:
#if aminoacid A co-occurs with B at freq f1, co-occurs with C at freq f2...f20, then:
#the the prob of A turning into B (A/B) = frequence (A/B) / frequence(A/anything)
#prob (A/B) = f1/ (f1+f2+.....f20)
for i in range(len(params.BLOSUM_matrix)):
aminoacid = params.aminoacids[i]
aminoacid_prob = sum(params.BLOSUM_matrix[i])
normalized_row = dict()
for j, freq in enumerate(params.BLOSUM_matrix[i]):
#j is the index of the "target" aminoacid.
normalized_row[params.aminoacids[j]] = (freq/aminoacid_prob)
params.aminoacid_subs_probs[aminoacid] =normalized_row
#print("the aminoacid substitution
#probabilities:")
#for k,v in params.aminoacid_subs_probs.items():
# print(k, v)
# print("\n")
def _calculate_codon_subs_probs():
#calculating the codon substitution probabilities from the aminoacid
#substitution probabilities.
for aa in params.aminoacids:
for c in params.aminoacids_to_codons[aa]:
c_probs = dict()
for aa2 in params.aminoacids:
aa_to_aa2_subs_prob = params.aminoacid_subs_probs[aa][aa2]
num_codons_for_aa2 = len(params.aminoacids_to_codons[aa2])
for c2 in params.aminoacids_to_codons[aa2]:
c_to_c2_prob = _normalize(aa_to_aa2_subs_prob/
num_codons_for_aa2)
c_probs[c2] = c_to_c2_prob
params.codon_subs_probs[c] = c_probs
def _normalize(p):
#normalizes a codon substitution probability to account for
#the probability of a stop codon being emitted.
#Effectively, this function makes every codon sub prob smaller
#to "make room" for the codon/stopcodon sub prob, such that
#all the sub probabilities for a codon still sum to 1.
return p * (1-params.stop_codon_emission_prob)
def _add_stopcodon_probs(matrix = {"blosum", "eth"}):
'''
Adds the probability of substitution into a stop codon to the
specified matrix. the probability used in specified in
params.stop_codon_emission_prob
'''
if matrix == "blosum":
table = params.codon_subs_probs
else: #eth matrix:
table = params.eth_codon_subs_probs
for c in params.codons:
for sc in params.stop_codons:
#since there are 3 stop codons
table[c][sc] = params.stop_codon_emission_prob/3
def _set_uniform_distribution():
p = 1.0/64
for c in params.codons:
params.uniform_distribution[c] = p
for c in params.stop_codons:
params.uniform_distribution[c] = p
#params.uniform_distribution = {c:p for c in params.codons
# or c in params.stop_codons}
def _set_uniform_dist_stop_codons():
#distribution for the codon match states that correspong to stop codons
#at those states only stop codons can be emitted.
for stop_codon in params.stop_codons:
params.uniform_dist_stop_codons[stop_codon] = float(1)/3
def modify_sequence(seq, insertions):
'''
Adds the alignment markers (---) to the given sequence at
the places specified in the argument 'insertions'.
'''
#print("insertions: " + str(insertions))
seq_modified = ""
s1_index = 0
for i, inser in enumerate(insertions):
seq_modified += seq[s1_index: inser[0]]
seq_modified += "-"*inser[1]
s1_index = inser[0]
seq_modified += seq[s1_index:]
return seq_modified
def insertionsCount(path, i):
'''
function needed for printing out the alignment.
locates where the insertions take place, and their length
'''
if path[i].startswith("ntI") or path[i].startswith("I"):
return 1
else:
return 0
def deletionsCount(path, i):
'''
function needed for printing out the alignment
locates where the deletions take place and their length
'''
if _isC(2, path[i]) and _isJumpsite(path[i-1]):
return 1
elif _isC(3, path[i]) and _isJumpsite(path[i-1]):
return 2
elif _isJumpsite(path[i]) and _isJumpsite(path[i-1]):
return span_length(path[i-1], path[i])
elif _isC(3, path[i]) and _isC(1, path[i-1]):
return 1
elif _isJumpsite(path[i]) and _isC(1, path[i-1]):
return 2
else:
return 0
def matchCount(path, i):
if path[i].startswith("C"):
return 1
else:
return 0
def intronsCount(path, i):
if (path[i].startswith("BEGIN") or path[i].startswith("acc") or
path[i].startswith("do") or path[i].startswith("END")):
return 1
else:
return 0
def leading_introns_count(path):
return _count_introns(path, "leading")
def trailing_introns_count(path):
return _count_introns(path, "trailing")
def _count_introns(path, side = {"leading", "trailing"}):
if side=="leading":
return path.count("BEGIN2")
if side=="trailing":
return path.count("END1")
def acceptor_skipped(path):
'''
takes a path as an argument and returns true if and only if
the "acc" states were skipped over
'''
return not "acc1" in path
def donor_skipped(path):
'''
takes a path as an argument and returns true if and only if
the donor states were skipped over,
i.e. if the path has the transition JUMPSITE_END1
'''
return not "do1" in path
'''
if "END1" in path:
index_end = path.index("END1")
else:
#If the path does not have END1 state, we look at the model-end
#state (end2 state), which is the last state.
index_end = len(path)-1
#if the state prior to the 2st END1 state is a jump state, then the
#donor site has been skipped.
return path[index_end-1].startswith("jump")
'''
def _isC(i, state):
return state.startswith("C") and state[-1] == str(i)
def _isJumpsite(state):
return state.startswith("jump")
def span_length(jumpsiteA, jumpsiteB):
start = int(jumpsiteA[8:])
end = int(jumpsiteB[8:])
return (end-start) *3
'''
we are not using those two printout functions anymore
def print_sequences_wrapped_firstdraft(s1, s2):
if len(s1) < 101: #no wrapping needed
print(s1)
print(s2)
else:
lasti = 0
for i in range(0, len(s1)-100, 100):
print(s1[i: i+100])
print(s2[i:i+100] + "\n")
lasti = i
print(s1[lasti:])
print(s2[lasti:] + "\n")
def print_sequences_wrapped(s1, s2):
s1_segments = _segment(s1, 100)
s2_segments = _segment(s2, 100)
for seg1, seg2 in zip(s1_segments, s2_segments):
print(seg1)
print(seg2 + "\n")
'''
def _segment(s1, n):
s1_segments = [s1[i-n:i] for i in range(n,len(s1), n)]
s1_segments.append(s1[-(len(s1)%n):])
return s1_segments
def is_frameshift( path, index):
'''
Function returns true if there is a frameshift at the specified
location in the path, e.g. if there is a transition from jump-state to
CM3 state.
'''
s1 = path[index]
s2 = path[index+1]
if ((s1[0:2] == "nt" and
not _nt_insertions_multiple_of_3(path, index))
or
(s1.startswith("jump") and
s2[0] == "C" and s2[-1] == "2")
or
(s1.startswith("jump") and
s2[0] == "C" and s2[-1] == "3")
or
(s1[0] == "C" and s1[-1] == "1" and
s2[0] == "C" and s2[-1] == "3")
or
(s1[0] == "C" and s1[-1] == "1" and
s2[0:4] == "jump")):
return True
else:
return False
def _nt_insertions_multiple_of_3(path, start_index):
#this function does not consider 0 to be a multiple of 3.
#If there are no nt_Insertion states at the specified index,
#it return False
#counts the nt states in both directions
#case where there are 0 nts starting from the current position
if not path[start_index].startswith("nt"):
return False
nti_count = 0
for s in path[start_index:]:
if s.startswith("nt"):
nti_count += 1
else:
break
for s in reversed(path[0: start_index]):
if s.startswith("nt"):
nti_count += 1
else:
break
return (nti_count%3) == 0
def map_state_to_query_seq_position(path, state_index):
'''
The function takes a path and an index in that path (state)
and returns the location in the sequence that
corresponds to the state.
'''
p = path[0:state_index]
silent_states = [x for x in p if (x.startswith("jump") or
x.endswith("-start"))]
return len(p)-len(silent_states)
def emits_stop_codon(queryseq, path, i):
'''
Return True iff the states at position i in the path emit a stop codon.
'''
if ((_isC(1, path[i]) and _isC(2, path[i+1]) and _isC(3, path[i+2]))
or
(path[i].startswith("ntI") and path[i+1].startswith("ntI") and
path[i+2].startswith("ntI"))):
seqindex = map_state_to_query_seq_position(path, i)
emitted_segment = queryseq[seqindex: seqindex+3]
if emitted_segment in params.stop_codons:
return True
else:
return False
else:
return False
def lastCodonInTheExon(path, index):
'''
True if the index marks the start of the last codon in the sequence
i.e. what comes after the codon is a donor site.
'''
return path[i+3].startswith("do1")
def parse_instance(filename):
'''
parses instance from a FASTA file.
instance means: 1 refernce sequence (1st sequence in file) and 1 OR MORE
query sequences.
The upstream and downstream intron phases for the reference sequence must
be indicated by number of lowercase nucleotides at the begining/end of the
ref sequence.
function can handle sequences that are already aligned (i.e. contain ---)
'''
#print("\nCurrently parsing file: " + filename + "\n")
#incoming_phase = int(filename[-3])
#outgoing_phase = int(filename[-1])
#print("\nincoming intron phase: "+ str(incoming_phase))
#print("\noutgoing intron phase: "+ str(outgoing_phase))
f = open(filename)
lines = f.readlines()
ref_original = lines[1].strip()
queries_original = [lines[i].strip() for i in range(3, len(lines), 2)]
queries_names = [lines[i].strip()[1:] for i in range(2, len(lines)-1, 2)]
incoming_phase = (3- leading_intron_in_sequence(ref_original))%3
outgoing_phase = trailing_intron_in_sequence(ref_original)
#print("\nUpstream intron phase: "+ str(incoming_phase) +
# ". Downstream intron phase: "+ str(outgoing_phase))
ref = ref_original.upper().replace("-", "").replace("\n", "")
queries = [q.upper().replace("-", "").replace("\n", "")
for q in queries_original]
#print("\nIN PARSE_INSTANCE. REF: " + str(ref_original))
#print("\nIN PARSE_INSTANCE. QUERIES: " + str(queries_original))
return (ref_original, queries_original, ref,
queries, incoming_phase, outgoing_phase,
queries_names)
def mark_intronic_region(seq_r, incoming_phase, outgoing_phase):
''' a fix for instances where the intronic NTs occuring after the
splice and before the donor site are not marked correctly
ideally, those instannces should be fixed at the source'''
leading_introns = leading_intron_in_sequence(seq_r)
trailing_introns = trailing_intron_in_sequence(seq_r)
seq = seq_r[leading_introns: len(seq_r)-trailing_introns]
seq_m = (seq[0:(3-incoming_phase)%3].lower() +
seq[(3-incoming_phase)%3:len(seq)-outgoing_phase] +
seq[len(seq)-outgoing_phase:].lower())
seq_f = (seq_r[0:leading_introns] +
seq_m +
seq_r[len(seq_r)-trailing_introns:])
return seq_f
def pair_to_path(model, pair):
'''
Function takes an aligned pair and deduces the path in the model that
corresponds to that alignment.
Purpose is to be able to score how good an alignment is by getting the
likelihood of the corresponding path and comparing it to the Viterbi
'''
max_del_span = model.paramsconfig.maximum_cd
model_name = model.name
ref_raw = pair[0].strip()
query_raw = pair[1].strip()
#print("\nPAIR_TO_PATH, ref_raw: " + ref_raw)
#print("\nPAIR_TO_PATH, query_raw: " + query_raw)
path = [model_name + "-start"]
incoming_intron_symbols = leading_intron_in_sequence(ref_raw)
outgoing_intron_symbols = trailing_intron_in_sequence(ref_raw)
#print("INCOMING INTRON SYMBOLS: " + str(incoming_intron_symbols))
#print("OUTGOING INTRON SYMBOLS: " + str(outgoing_intron_symbols))
intron_length = leading_intron_in_sequence(query_raw)
acc_states_count = len(model.paramsconfig.acc_dists)
path += ['BEGIN2'] * (intron_length - acc_states_count - incoming_intron_symbols)
for i in range(acc_states_count):
path.append('acc' + str(i+1))
if incoming_intron_symbols == 2:
path.append("incoming_intron1")
path.append("incoming_intron2")
if incoming_intron_symbols == 1:
path.append("incoming_intron1")
path_tail = [model_name + "-end"]
trailing_intron_length = trailing_intron_in_sequence(query_raw)
do_states_count = len(model.paramsconfig.do_dists)
path_tail += ['END1'] * (trailing_intron_length - do_states_count -outgoing_intron_symbols)
#adding the do states in reverse, because this "tail" section will be reversed
#before it gets appended to the path.
for i in range(do_states_count, 0, -1):
path_tail.append("do" + str(i))
if outgoing_intron_symbols == 1:
path_tail.append("outgoing_intron1")
if outgoing_intron_symbols == 2:
path_tail.append("outgoing_intron2")
path_tail.append("outgoing_intron1")
ref_introns_removed = ref_raw[incoming_intron_symbols:
len(ref_raw)-outgoing_intron_symbols]
#print("\nPAIR_TO_PATH, ref introns removed: " + ref_introns_removed)
ref = ref_introns_removed.strip().replace("A", "N").replace("G", "N").replace("C", "N").replace("T", "N")
query = query_raw[intron_length: len(pair[1])-trailing_intron_length]
#print("\nPAIR_TO_PATH, query introns removed: " + query)
query = query.strip().replace("A", "N").replace("G", "N").replace("C", "N").replace("T", "N")
#print("\n length of ref: "+ str(len(ref)) + "\nlength of query: "+ str(len(query)))
#print("in pair_to_path function, ref sequence: "+ ref)
#print("\nquery sequence: " + ref)
#print("intron length: "+ str(intron_length))
#print("trailing intron length "+ str(trailing_intron_length))
#print("query now is: " + query)
codonmatch_indx = 0
jumpsite_indx = 0
ri =0
qi = 0
while ri <len(ref) and qi < len(query):
segments = (ref[ri:ri+3], query[qi:qi+3])
#print("SEGMENTS NOW: "+ str(segments))
if segments == ("NNN", "NNN"):
path.append("jumpsite" + str(jumpsite_indx))
path.append("C" + str(codonmatch_indx) + "1")
path.append("C" + str(codonmatch_indx) + "2")
path.append("C" + str(codonmatch_indx) + "3")
jumpsite_indx += 1
codonmatch_indx +=1
qi += 3
ri += 3
elif segments == ("---", "NNN"):
path.append("I" + str(codonmatch_indx -1) + "1")
path.append("I" + str(codonmatch_indx -1) + "2")
path.append("I" + str(codonmatch_indx -1) + "3")
qi += 3
ri += 3
subsequent_ci = _dashes_ahead(query, qi+3)/3
for sc_i in range(subsequent_ci):
path.append("I" + str(codonmatch_indx -1) + "1")
path.append("I" + str(codonmatch_indx -1) + "2")
path.append("I" + str(codonmatch_indx -1) + "3")
qi +=3
ri +=3
elif segments == ("NNN", "---"):
dashes_ahead = _dashes_ahead(query, qi)
if dashes_ahead%3 == 0:
num_deleted_codons = dashes_ahead/3
elif dashes_ahead%3 == 1:
num_deleted_codons = (dashes_ahead-1)/3
else:
num_deleted_codons = (dashes_ahead-2)/3
deletion_composition = get_deletion_composition(num_deleted_codons,
max_del_span)
path.append("jumpsite" + str(jumpsite_indx))
for i in deletion_composition[:-1]:
path.append("jumpsite" + str(jumpsite_indx+i))
jumpsite_indx += i
codonmatch_indx += i
jumpsite_indx += deletion_composition[-1]
codonmatch_indx += deletion_composition[-1]
qi += num_deleted_codons*3
ri += num_deleted_codons*3
elif segments == ("NNN", "-NN"):
path.append("jumpsite" + str(jumpsite_indx))
path.append("C" + str(codonmatch_indx) + "2")
path.append("C" + str(codonmatch_indx) + "3")
jumpsite_indx += 1
codonmatch_indx +=1
qi += 3
ri += 3
elif segments == ("NNN", "--N"):
path.append("jumpsite" + str(jumpsite_indx))
path.append("C" + str(codonmatch_indx) + "3")
jumpsite_indx += 1
codonmatch_indx +=1
qi += 3
ri += 3
elif segments == ("NNN", "N--"):
path.append("jumpsite" + str(jumpsite_indx))
path.append("C" + str(codonmatch_indx) + "1")
jumpsite_indx += 1
codonmatch_indx +=1
qi += 3
ri += 3
elif segments == ("NNN", "N-N"):
path.append("jumpsite" + str(jumpsite_indx))
path.append("C" + str(codonmatch_indx) + "1")
path.append("C" + str(codonmatch_indx) + "3")
jumpsite_indx += 1
codonmatch_indx +=1
qi += 3
ri += 3
elif (segments == ("-NN", "NNN") or
segments == ("-N", "NN") or
segments == ("-", "N")):
path.append("ntI" + str(codonmatch_indx -1))
ri += 1
qi += 1
elif (segments == ("--N", "NNN") or
segments == ("--", "NN")):
path.append("ntI" + str(codonmatch_indx -1))
path.append("ntI" + str(codonmatch_indx -1))
ri += 2
qi += 2
else:
print("Unkown sequence pattern: " + str(segments) +
"\nref index now is: " + str(ri) + "\nquery index now is: " + str(qi))
break
path_tail.append("jumpsite" + str(jumpsite_indx))
path_tail.reverse()
path += path_tail
#print("\nDECODED PATH: " + str(path))
return path
def leading_intron_in_sequence(s):
'''
function counts the number of intronic nt occuring at the start
of a sequence
'''
seq = s.strip()
l = 0
for i in seq:
if i in ['a', 'g', 'c', 't']:
l +=1
else:
break
return l
def trailing_intron_in_sequence(s):
'''
function counts the number of intronic nt occuring at the end
of a sequence
'''
return leading_intron_in_sequence(s[::-1])
def _dashes_ahead(seq, indx):
count = 0
while indx < len(seq) and seq[indx] == "-":
count += 1
indx += 1
return count
def aggregate_ratios(ratios):
results = [0, 0, 0, 0]
for r in ratios:
if r >= 0.99 and r <= 1.01:
results[0] += 1
elif r>1.01 and r <=10:
results[1] += 1
elif r>10 and r <= 100:
results[2] += 1
elif r> 100:
results[3] += 1
else:
print("UNEXPECTED VALUE for ratio of viterbi path to original: " +
str(r))
if len(ratios)!= 0:
arith_avg = float(sum(ratios)/len(ratios))
#print("\nsum of ratios: "+ str(sum(ratios)))
#print("\nnum of ratios: " + str(len(ratios)))
#print("divided: " + str(arith_avg))
if arith_avg != 0:
log_arith_avg = math.log(arith_avg)
else:
log_arith_avg = 0 #or maybe set to non-defined
else:
arith_avg = 0
log_arith_avg = -1
results.append(arith_avg)
results.append(log_arith_avg)
results.append(harmonic_mean(ratios))
return results
def get_ratio_class(r):
'''
Categorizing the rations into 5 classes.
'''
if r >= 0.99 and r <= 1.01:
return 1
elif r>1.01 and r <=10:
return 2
elif r>10 and r <= 100:
return 3
elif r > 100:
return 4
else:
return "5 NONDEF"
def calculate_offset(query_seq):
'''
Calculates the needed white space when printing out the alignment.
'''
offset = 0
for i, v in enumerate(query_seq):
if v in ['A', 'C', 'G', 'T']:
offset = i
break
return offset
def harmonic_mean(l):
'''
Calculates harmonic mean, which is better suited for handling
outlier values than the arithmatic mean.
'''
return -2
'''
inverse = [1.0/float(x) for x in l if x!=0]
if sum(inverse) == 0:
return 0
else:
return float(len(inverse))/sum(inverse)
'''
def get_deletion_composition(num_codons_deleted, max_del_span):
'''
Fuction that calculates the deletion compsotion of a multi-codon deletion.
e.g. Assuming the model allows a maximum of 10 codon deletions.
for a deletion composition of 6 codons. The result is [6]
for a deletion composition of 13 codons, the results is [10,3]
for a deletion composition of 24 codons, the result is [10,10,4]
function is used in deducing a path from a given alignment.
'''
if num_codons_deleted <= max_del_span:
return [num_codons_deleted]
else:
composition = [max_del_span] * int(float(num_codons_deleted)/
max_del_span)
if num_codons_deleted% max_del_span !=0:
composition.append(num_codons_deleted%max_del_span)
return composition
def get_exon_start(seq):
'''
Finds the location in a sequence where the exon starts.
'''
if seq.count('A') != 0:
a = seq.index('A')
else:
a = len(seq)
if seq.count('T') != 0:
t = seq.index('T')
else:
t = len(seq)
if seq.count('C') != 0:
c = seq.index('C')
else:
c = len(seq)
if seq.count('G') != 0:
g = seq.index('G')
else:
g = len(seq)
return min([a, t, g, c])
def get_exon_end(seq):
'''
Finds the location in a sequence where the exon ends.
'''
seq = seq[::-1]
return len(seq) - get_exon_start(seq) -1
def get_underlying_codons(c, exclude_stop_codons):
codons = [c]
if c[0] == 'N':
codons = [x+ c[1]+c[2] for x in ['A', 'T', 'C', 'G']]
for codon in codons:
if codon[1] == 'N':
codons.extend([codon[0] + x + codon[2] for x in ['A', 'T', 'C', 'G']])
for codon in codons:
if codon[2] == 'N':
codons.extend([codon[0] + codon[1] + x for x in ['A','T','C','G']])
if exclude_stop_codons:
return [x for x in codons if x.count('N') == 0
and x not in params.stop_codons]
else:
return [x for x in codons if x.count('N') == 0]
def calculate_average_emissions_table(codons, matrix = {"eth", "blosum"}):
'''
this function constructs emission table (containing substitution probs)
which is the average of the emission tables of the argument 'codons'.
it is meant to be used to calculate the emissions table for a codon
containing N.
example: in the case of codon: ANG, this function will be called thus:
(matrix, [AAG, ATG, ACG, AGG]). Suppose thesubsitition prob of AAG/CCC is
p1, ATG/CCC = p2, ACG/CCC = p3, AGG/CCC = p4
then:
in ther esulting table, the value associated with CCC will be the
arith. average of p1, p2, p3 p4
'''
tables = []
if matrix.lower() == 'blosum':
subs_probs = params.codon_subs_probs
else:
subs_probs = params.eth_codon_subs_probs
for c in codons:
tables.append(subs_probs[c])
result = dict()
for aa in tables[0].keys():
values_for_aa = [t[aa] for t in tables]
avg_value = float(sum(values_for_aa))/len(values_for_aa)
result[aa] = avg_value
return result
def get_emissions_table_N_codon(codon, matrix):
codons = get_underlying_codons(codon, True)
table = calculate_average_emissions_table(codons, matrix)
return table
def table_to_function(table):
#print("KEYS: " + str(len(table.keys())))
#if len(table.keys()) < 61:
# print(table.keys())
def func(c):
if c.count('N') == 0:
if table.has_key(c) and table[c] > 0:
return math.log(table[c])
else:
return float("-inf")
else:
underlying_codons = get_underlying_codons(c, False)
probs = [table[x] for x in underlying_codons]
avg_prob = float(sum(probs))/len(probs)
return math.log(avg_prob)if avg_prob > 0 else float("-inf")
return func
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.