gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
from collections import deque
try:
from importlib import lock_held as import_lock_held
except ImportError:
from imp import lock_held as import_lock_held
import os
import sys
import threading
import warnings
from io import StringIO, TextIOBase
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = deque()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for i in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def close(self):
if self.closed:
return
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b'')
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def __init__(self, session, pub_thread, name, pipe=None, echo=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._subprocess_flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
self.echo = None
if echo:
if hasattr(echo, 'read') and hasattr(echo, 'write'):
self.echo = echo
else:
raise ValueError("echo argument must be a file like object")
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive():
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
# waiting across threads during import can cause deadlocks
# so only wait if import lock is not held
if not import_lock_held():
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print("Flush failed: {}".format(e),
file=sys.__stderr__)
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string):
if self.echo is not None:
try:
self.echo.write(string)
except OSError as e:
if self.echo is not sys.__stderr__:
print("Write failed: {}".format(e),
file=sys.__stderr__)
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if self._subprocess_flush_pending:
return
self._subprocess_flush_pending = True
# We can not rely on self._io_loop.call_later from a subprocess
self.pub_thread.schedule(self._flush)
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def writable(self):
return True
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
| |
# coding=utf-8
"""
Implements PowerManagement functions using /sys/class/power_supply/*
See doc/linux for platform-specific details.
"""
import os
import warnings
from power import common
POWER_SUPPLY_PATH = '/sys/class/power_supply'
if not os.access(POWER_SUPPLY_PATH, os.R_OK):
raise RuntimeError("Unable to read {path}.".format(path=POWER_SUPPLY_PATH))
class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type(supply_path):
"""
@param supply_path: Path to power supply
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
with open(os.path.join(supply_path, 'type'), 'r') as type_file:
type = type_file.readline().strip()
if type == 'Mains':
return common.POWER_TYPE_AC
elif type == 'UPS':
return common.POWER_TYPE_UPS
elif type == 'Battery':
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Type of {path} ({type}) is not supported".format(path=supply_path, type=type))
@staticmethod
def is_ac_online(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'online'), 'r') as online_file:
return online_file.readline().strip() == '1'
@staticmethod
def is_battery_present(supply_path):
"""
@param supply_path: Path to power supply
@return: True if battery is present. Otherwise False
"""
with open(os.path.join(supply_path, 'present'), 'r') as present_file:
return present_file.readline().strip() == '1'
@staticmethod
def is_battery_discharging(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'status'), 'r') as status_file:
return status_file.readline().strip() == 'Discharging'
@staticmethod
def get_battery_state(supply_path):
"""
@param supply_path: Path to power supply
@return: Tuple (energy_full, energy_now, power_now)
"""
try:
energy_now_file = open(os.path.join(supply_path, 'energy_now'), 'r')
except IOError:
energy_now_file = open(os.path.join(supply_path, 'charge_now'), 'r')
try:
energy_full_file = open(os.path.join(supply_path, 'energy_full'), 'r')
except IOError:
energy_full_file = open(os.path.join(supply_path, 'charge_full'), 'r')
try:
with open(os.path.join(supply_path, 'power_now'), 'r') as power_now_file:
power_now = float(power_now_file.readline().strip())
except IOError:
with open(os.path.join(supply_path, 'voltage_now'), 'r') as voltage_now_file:
with open(os.path.join(supply_path, 'current_now'), 'r') as current_now_file:
power_now = float(current_now_file.readline().strip()) * float(voltage_now_file.readline().strip()) / 10000000
with energy_now_file:
with energy_full_file:
energy_now = float(energy_now_file.readline().strip())
energy_full = float(energy_full_file.readline().strip())
return energy_full, energy_now, power_now
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
return common.POWER_TYPE_AC
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_energy_not_discharging = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
elif self.is_battery_present(supply_path) and not self.is_battery_discharging(supply_path):
energy_now = self.get_battery_state(supply_path)[1]
all_energy_not_discharging.append(energy_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])\
+ sum(all_energy_not_discharging) / (sum(all_power_now) / len(all_power_now)) * 60.0
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
| |
# -*- test-case-name: vumi.persist.tests.test_riak_manager -*-
"""A manager implementation on top of the riak Python package."""
import json
from riak import RiakClient, RiakObject, RiakMapReduce, RiakError
from vumi.persist.model import Manager, VumiRiakError
from vumi.utils import flatten_generator
def to_unicode(text, encoding='utf-8'):
if text is None:
return text
if isinstance(text, tuple):
return tuple(to_unicode(item, encoding) for item in text)
if not isinstance(text, unicode):
return text.decode(encoding)
return text
class VumiIndexPage(object):
"""
Wrapper around a page of index query results.
Iterating over this object will return the results for the current page.
"""
def __init__(self, index_page):
self._index_page = index_page
def __iter__(self):
if self._index_page.stream:
raise NotImplementedError("Streaming is not currently supported.")
return (to_unicode(item) for item in self._index_page)
def __eq__(self, other):
return self._index_page.__eq__(other)
def has_next_page(self):
"""
Indicate whether there are more results to follow.
:returns:
``True`` if there are more results, ``False`` if this is the last
page.
"""
return self._index_page.has_next_page()
@property
def continuation(self):
return to_unicode(self._index_page.continuation)
# Methods that touch the network.
def next_page(self):
"""
Fetch the next page of results.
:returns:
A new :class:`VumiIndexPage` object containing the next page of
results.
"""
if not self.has_next_page():
return None
try:
result = self._index_page.next_page()
except RiakError as e:
raise VumiRiakError(e)
return type(self)(result)
class VumiRiakBucket(object):
def __init__(self, riak_bucket):
self._riak_bucket = riak_bucket
def get_name(self):
return self._riak_bucket.name
# Methods that touch the network.
def get_index(self, index_name, start_value, end_value=None,
return_terms=None):
keys = self.get_index_page(
index_name, start_value, end_value, return_terms=return_terms)
return list(keys)
def get_index_page(self, index_name, start_value, end_value=None,
return_terms=None, max_results=None, continuation=None):
try:
result = self._riak_bucket.get_index(
index_name, start_value, end_value, return_terms=return_terms,
max_results=max_results, continuation=continuation)
except RiakError as e:
raise VumiRiakError(e)
return VumiIndexPage(result)
class VumiRiakObject(object):
def __init__(self, riak_obj):
self._riak_obj = riak_obj
@property
def key(self):
return self._riak_obj.key
def get_key(self):
return self.key
def get_content_type(self):
return self._riak_obj.content_type
def set_content_type(self, content_type):
self._riak_obj.content_type = content_type
def get_data(self):
return self._riak_obj.data
def set_data(self, data):
self._riak_obj.data = data
def set_encoded_data(self, encoded_data):
self._riak_obj.encoded_data = encoded_data
def set_data_field(self, key, value):
self._riak_obj.data[key] = value
def delete_data_field(self, key):
del self._riak_obj.data[key]
def get_indexes(self):
return self._riak_obj.indexes
def set_indexes(self, indexes):
self._riak_obj.indexes = indexes
def add_index(self, index_name, index_value):
self._riak_obj.add_index(index_name, index_value)
def remove_index(self, index_name, index_value=None):
self._riak_obj.remove_index(index_name, index_value)
def get_user_metadata(self):
return self._riak_obj.usermeta
def set_user_metadata(self, usermeta):
self._riak_obj.usermeta = usermeta
def get_bucket(self):
return VumiRiakBucket(self._riak_obj.bucket)
# Methods that touch the network.
def store(self):
return type(self)(self._riak_obj.store())
def reload(self):
return type(self)(self._riak_obj.reload())
def delete(self):
return type(self)(self._riak_obj.delete())
class RiakManager(Manager):
"""A persistence manager for the riak Python package."""
call_decorator = staticmethod(flatten_generator)
@classmethod
def from_config(cls, config):
config = config.copy()
bucket_prefix = config.pop('bucket_prefix')
load_bunch_size = config.pop(
'load_bunch_size', cls.DEFAULT_LOAD_BUNCH_SIZE)
mapreduce_timeout = config.pop(
'mapreduce_timeout', cls.DEFAULT_MAPREDUCE_TIMEOUT)
transport_type = config.pop('transport_type', 'http')
store_versions = config.pop('store_versions', None)
host = config.get('host', '127.0.0.1')
port = config.get('port')
prefix = config.get('prefix', 'riak')
mapred_prefix = config.get('mapred_prefix', 'mapred')
client_id = config.get('client_id')
transport_options = config.get('transport_options', {})
client_args = dict(
host=host, prefix=prefix, mapred_prefix=mapred_prefix,
protocol=transport_type, client_id=client_id,
transport_options=transport_options)
if port is not None:
client_args['port'] = port
client = RiakClient(**client_args)
# Some versions of the riak client library use simplejson by
# preference, which breaks some of our unicode assumptions. This makes
# sure we're using stdlib json which doesn't sometimes return
# bytestrings instead of unicode.
client.set_encoder('application/json', json.dumps)
client.set_encoder('text/json', json.dumps)
client.set_decoder('application/json', json.loads)
client.set_decoder('text/json', json.loads)
return cls(
client, bucket_prefix, load_bunch_size=load_bunch_size,
mapreduce_timeout=mapreduce_timeout, store_versions=store_versions)
def close_manager(self):
self.client.close()
def riak_bucket(self, bucket_name):
bucket = self.client.bucket(bucket_name)
if bucket is not None:
bucket = VumiRiakBucket(bucket)
return bucket
def riak_object(self, modelcls, key, result=None):
bucket = self.bucket_for_modelcls(modelcls)._riak_bucket
riak_object = VumiRiakObject(RiakObject(self.client, bucket, key))
if result:
metadata = result['metadata']
indexes = metadata['index']
if hasattr(indexes, 'items'):
# TODO: I think this is a Riak bug. In some cases
# (maybe when there are no indexes?) the index
# comes back as a list, in others (maybe when
# there are indexes?) it comes back as a dict.
indexes = indexes.items()
data = result['data']
riak_object.set_content_type(metadata['content-type'])
riak_object.set_indexes(indexes)
riak_object.set_encoded_data(data)
else:
riak_object.set_content_type("application/json")
riak_object.set_data({'$VERSION': modelcls.VERSION})
return riak_object
def store(self, modelobj):
riak_object = modelobj._riak_object
modelcls = type(modelobj)
model_name = "%s.%s" % (modelcls.__module__, modelcls.__name__)
store_version = self.store_versions.get(model_name, modelcls.VERSION)
# Run reverse migrators until we have the correct version of the data.
data_version = riak_object.get_data().get('$VERSION', None)
while data_version != store_version:
migrator = modelcls.MIGRATOR(
modelcls, self, data_version, reverse=True)
riak_object = migrator(riak_object).get_riak_object()
data_version = riak_object.get_data().get('$VERSION', None)
riak_object.store()
return modelobj
def delete(self, modelobj):
modelobj._riak_object.delete()
def load(self, modelcls, key, result=None):
riak_object = self.riak_object(modelcls, key, result)
if not result:
riak_object.reload()
was_migrated = False
# Run migrators until we have the correct version of the data.
while riak_object.get_data() is not None:
data_version = riak_object.get_data().get('$VERSION', None)
if data_version == modelcls.VERSION:
obj = modelcls(self, key, _riak_object=riak_object)
obj.was_migrated = was_migrated
return obj
migrator = modelcls.MIGRATOR(modelcls, self, data_version)
riak_object = migrator(riak_object).get_riak_object()
was_migrated = True
return None
def _load_multiple(self, modelcls, keys):
objs = (self.load(modelcls, key) for key in keys)
return [obj for obj in objs if obj is not None]
def riak_map_reduce(self):
return RiakMapReduce(self.client)
def run_map_reduce(self, mapreduce, mapper_func=None, reducer_func=None):
results = mapreduce.run(timeout=self.mapreduce_timeout)
if mapper_func is not None:
results = [mapper_func(self, row) for row in results]
if reducer_func is not None:
results = reducer_func(self, results)
return results
def _search_iteration(self, bucket, query, rows, start):
results = bucket.search(query, rows=rows, start=start)
return [doc["id"] for doc in results["docs"]]
def real_search(self, modelcls, query, rows=None, start=None):
rows = 1000 if rows is None else rows
bucket_name = self.bucket_name(modelcls)
bucket = self.client.bucket(bucket_name)
if start is not None:
return self._search_iteration(bucket, query, rows, start)
keys = []
new_keys = self._search_iteration(bucket, query, rows, 0)
while new_keys:
keys.extend(new_keys)
new_keys = self._search_iteration(bucket, query, rows, len(keys))
return keys
def riak_enable_search(self, modelcls):
bucket_name = self.bucket_name(modelcls)
bucket = self.client.bucket(bucket_name)
return bucket.enable_search()
def riak_search_enabled(self, modelcls):
bucket_name = self.bucket_name(modelcls)
bucket = self.client.bucket(bucket_name)
return bucket.search_enabled()
def should_quote_index_values(self):
return False
def purge_all(self):
buckets = self.client.get_buckets()
for bucket in buckets:
if bucket.name.startswith(self.bucket_prefix):
for key in bucket.get_keys():
obj = bucket.get(key)
obj.delete()
bucket.clear_properties()
| |
# -*- coding: utf-8 -*-
import mock
import unittest
from nose.tools import * # noqa
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from framework.auth import Auth
from website.addons.github.exceptions import NotFoundError
from website.addons.github import settings as github_settings
from website.addons.github.exceptions import TooBigToRenderError
from website.addons.github.tests.factories import GitHubOauthSettingsFactory
from website.addons.github.model import AddonGitHubUserSettings
from website.addons.github.model import AddonGitHubNodeSettings
from website.addons.github.model import AddonGitHubOauthSettings
from website.addons.github.model import GithubGuidFile
from .utils import create_mock_github
mock_github = create_mock_github()
class TestFileGuid(OsfTestCase):
def setUp(self):
super(OsfTestCase, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('github', auth=Auth(self.user))
self.node_addon = self.project.get_addon('github')
def test_provider(self):
assert_equal('github', GithubGuidFile().provider)
def test_correct_path(self):
guid, _ = self.node_addon.find_or_create_file_guid('perth')
assert_equal(guid.waterbutler_path, 'perth')
assert_equal(guid.waterbutler_path, guid.path)
@mock.patch('website.addons.base.requests.get')
def test_unique_identifier(self, mock_get):
mock_response = mock.Mock(ok=True, status_code=200)
mock_get.return_value = mock_response
mock_response.json.return_value = {
'data': {
'name': 'Morty',
'extra': {
'fileSha': 'Im a little tea pot'
}
}
}
guid, _ = self.node_addon.find_or_create_file_guid('perth')
guid.enrich()
assert_equal(guid.unique_identifier, 'Im a little tea pot')
def test_exception_from_response(self):
mock_response = mock.Mock()
mock_response.json.return_value = {'errors': [{'code': 'too_large'}]}
guid, _ = self.node_addon.find_or_create_file_guid('perth')
with assert_raises(TooBigToRenderError):
guid._exception_from_response(mock_response)
def test_node_addon_get_or_create(self):
guid, created = self.node_addon.find_or_create_file_guid('/4/2')
assert_true(created)
assert_equal(guid.waterbutler_path, '/4/2')
def test_node_addon_get_or_create_finds(self):
guid1, created1 = self.node_addon.find_or_create_file_guid('/foo/bar')
guid2, created2 = self.node_addon.find_or_create_file_guid('/foo/bar')
assert_true(created1)
assert_false(created2)
assert_equals(guid1, guid2)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_private_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
assert_true(message)
assert_in('made private', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_public_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'public'
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
False,
)
assert_true(message)
assert_in('made public', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.repo')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_not_authenticated(self, mock_set_privacy, mock_repo):
mock_set_privacy.return_value = {'errors': ['it broke']}
mock_repo.return_value = {'private': True}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
assert_in('could not set privacy', message.lower())
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
def test_does_not_get_copied_to_registrations(self):
registration = self.project.register_node(
schema=None,
auth=Auth(user=self.project.creator),
template='Template1',
data='hodor'
)
assert_false(registration.has_addon('github'))
class TestAddonGithubUserSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user_settings = AddonGitHubUserSettings()
self.oauth_settings = AddonGitHubOauthSettings()
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
def test_repr(self):
self.user_settings.owner = UserFactory()
assert_in(self.user_settings.owner._id, repr(self.user_settings))
oauth_settings = GitHubOauthSettingsFactory()
def test_public_id_is_none_if_no_oauth_settings_attached(self):
self.user_settings.oauth_settings = None
self.user_settings.save()
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1053
assert_is_none(self.user_settings.public_id)
def test_github_user_name(self):
self.oauth_settings.github_user_name = "test user name"
self.oauth_settings.save()
assert_equal(self.user_settings.github_user_name, "test user name")
def test_oauth_access_token(self):
self.oauth_settings.oauth_access_token = "test access token"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_access_token, "test access token")
def test_oauth_token_type(self):
self.oauth_settings.oauth_token_type = "test token type"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_token_type, "test token type")
@mock.patch('website.addons.github.api.GitHub.revoke_token')
def test_clear_auth(self, mock_revoke_token):
mock_revoke_token.return_value = True
self.user_settings.clear_auth(save=True)
assert_false(self.user_settings.github_user_name)
assert_false(self.user_settings.oauth_token_type)
assert_false(self.user_settings.oauth_access_token)
assert_false(self.user_settings.oauth_settings)
class TestAddonGithubNodeSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
self.node_settings = AddonGitHubNodeSettings(
owner=ProjectFactory(),
user='chrisseto',
repo='openpokemon',
user_settings=self.user_settings,
)
self.node_settings.save()
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_false(self):
self.node_settings.user = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_repo_false(self):
self.node_settings.repo = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
def test_to_json_noauthorizing_authed_user(self):
user = UserFactory()
user.add_addon('github')
user_settings = user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_settings.oauth_settings = self.oauth_settings
user_settings.save()
self.node_settings.to_json(user)
| |
import re
import sys
from re import sub
for path in sys.path:
if path and 'anaconda' in path:
sys.path.remove(path)
import numpy as np
from pybedtools import *
from pyfaidx import Fasta
import subprocess, os, shutil
from collections import *
import time
import dill as pickle
#from multiprocessing import Pool
from difflib import SequenceMatcher
def softmask(a):
return
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
# help from online sources ^^^^, helps verify backtracking from MAFs to the original fastas
def parseConfigFindPath(stringFind,configFile):
"""findPath will find path of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or info
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
def parseConfigFindList(stringFind,configFile):
"""parseConfigFindList inputs a particular string to find and read file after and a configuration file object
outputs list of relevant filenames"""
read = 0
listOfItems = []
for line in configFile:
if line:
if read == 1:
if 'Stop' in line:
configFile.seek(0)
break # exit the function and return the list of files or list information
listOfItems.append(line.strip('\n'))
if stringFind in line:
read = 1 # if find string specified, begin reading lines
configFile.seek(0)
return listOfItems
start = time.clock()
pickleSkip = 0
print 'Loading CNS configuration file...','time=',time.clock()-start
configFile = open('configCNSAnalysis.txt','r')
rootfolder = parseConfigFindPath('root_folder',configFile)
pathPython = parseConfigFindPath('pathPython',configFile)
# species names and IDs, FIXME please can add this info to a config file
masterListSpecies = parseConfigFindList('masterListSpecies',configFile)
checkValidity = parseConfigFindPath('checkValidity',configFile)
intragenus = parseConfigFindList('intragenus',configFile)
intergenus = parseConfigFindList('intergenus',configFile)
subgenome = parseConfigFindList('subgenome',configFile)
conservedFastaPath = parseConfigFindPath('conservedFastaPath',configFile)
pickleSkip = parseConfigFindPath('pickleSkip',configFile)
pickleName = parseConfigFindPath('pickleName',configFile)
fasta2phylip = parseConfigFindPath('fasta2phylip',configFile)
phyML = parseConfigFindPath('PhyML',configFile)
bootstrap = parseConfigFindPath('bootstrap',configFile)
treeFile = parseConfigFindPath('treeFile',configFile)
treeOut = parseConfigFindPath('treeOut',configFile)
ratioCopy = parseConfigFindPath('ratioCopy',configFile)
outputTreeImages = parseConfigFindPath('outputTreeImages',configFile)
configFile.close()
if phyML == '1':
phyML = 1
else:
phyML = 0
if outputTreeImages == '1':
outputTreeImages = 1
else:
outputTreeImages = 0
if ratioCopy == '1':
ratioCopy = 1
else:
ratioCopy = 0
if fasta2phylip == '1':
fasta2phylip = 1
else:
fasta2phylip = 0
if treeOut == '1':
treeOut = 1
else:
treeOut = 0
if pickleSkip == '1':
pickleSkip = 1
else:
pickleSkip = 0
if checkValidity == '0':
checkValidity = 0
sys.path.append(pathPython) # add python path
class speciesClass(): # add information about species that stores name and protyome ID, genome .fa file Fasta object, conserved Bed Element files
#and you can generate bed files for genes of species and CDS of species
def __init__(self,speciesNumber,genomeFileList,gffFileList,speciesName,speciesShortName):
self.speciesNumber = speciesNumber
for file in genomeFileList:
if self.speciesNumber in file:
self.genome = Fasta(file)
for file in gffFileList:
if self.speciesNumber in file and 'PAC' in file:
self.gffFile = file
self.speciesName = speciesName
self.speciesShortName = speciesShortName
self.conservedElementsBed = '%s_ConservedElements.bed'%self.speciesName
#self.conservedElementsBedFile = open(self.conservedElementsBed, 'w')
speciesInfo = {}
conditionalDictionary = defaultdict(list)
# list all files in analysis directory
listALLFiles = str(subprocess.Popen('ls', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# FIXME ['ls', '%s' % '']
print 'Generating File List','time=',time.clock()-start
# generate list of MAF, GFF and .fa files
listMAFfiles = []
listGFFFiles = []
listGenomeFiles = []
for file in listALLFiles:
if file.endswith('.maf'):
listMAFfiles.append(file.strip('\n'))
if file.endswith('.gff') or file.endswith('.gff3'):
listGFFFiles.append(file.strip('\n'))
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file.strip('\n'))
print 'Initializing instances of species class...','time=',time.clock()-start
# generate speciesClass objects with relevant info seen above, for each species on masterListSpecies
for species in masterListSpecies:
speciesInfo[species] = speciesClass(species.split('_')[1], listGenomeFiles, listGFFFiles, species.split('_')[0], species.split('_')[2])
"""
def turnSixteen(x):
if x == -1:
return 16
else:
return x
"""
print 'Generating list of intragenus, intergenus species and complete species list','time=',time.clock()-start
listIntraGenus = []
listInterGenus = []
listSubgenomes = []
for shortName in intragenus:
listIntraGenus.append([species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][0])
if shortName in subgenome:
listSubgenomes.append(
[species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][
0])
for shortName in intergenus:
listInterGenus.append([species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][0])
listIncludedSpecies = listIntraGenus+listInterGenus
def findBadCharPosition(strSeq):
"""for each MAF sequence, output maximum number of valid characters in a row, exclude duplicates/lowercase/N/softmask <- invalid
only accept sequence in analysis if at least 15 valid characters in a row"""
#minVal = np.min(np.vectorize(lambda y: turnSixteen(y))(np.vectorize(lambda x: strSeq.find(x))(np.array(['a','t','c','g','N']))))
if 'a' in strSeq or 'c' in strSeq or 'N' in strSeq or 'g' in strSeq or 't' in strSeq:
return np.max(np.vectorize(lambda x: len(x))(np.array(strSeq.replace('a','N').replace('c','N').replace('t','N').replace('g','N').strip('-').split('N'))))
else:
return 16
# if checking whether original fasta sequences are valid, MAF info can backtrack to find bed information/fasta DNA sequence start stop info
if checkValidity:
open('CheckValidity.txt','w').close()
checkValidFile = open('CheckValidity.txt','w')
segmentCount = 0
# original conditionals, two-copy ratios between species, NKH 111 is PvirN has 1 sequence, K has 1, Hallii has 1
# can design something similar for Bhybridum analysis... eg. BdDSBs1011 D and S are subgenomes
# check out each MAF
mafAnalysisStructure = defaultdict(list)
writeConservedBedFiles = dict.fromkeys(masterListSpecies,)
validityCount = Counter()
cacOutCount = Counter()
for species in listIncludedSpecies:
validityCount[species] = 0
for file in listMAFfiles:
print file
inputMAF = open(file,'r')
# for each segment in MAF file
for segment in inputMAF.read().split('\n\n'):
# dont skip analyzing segment or writing lines
skipSegment = 0
speciesList = []
outputInfo = []
if '#' in segment:
skipSegment = 1
for line in segment.split('\n'):
if line and skipSegment == 0:
if line[0] == 's' and 'Anc' not in line.split()[1]: # if this is a sequence from an actual species
# if length of sequence is >= 20 and over 15 valid characters in row
if int(line.split()[3]) >= 20 and findBadCharPosition(line.split()[-1]) >= 15:
lineList = line.split()
lineList2 = lineList[1].split('.')
speciesName = lineList2[0]
if len(lineList2) > 3:
lineList2 = lineList2[0:1] + ['.'.join(lineList2[2:])]
lineList3 = lineList2[-1].split('_')
if lineList[4] == '-': # negative orientation of sequence, start and end coordinates found from right end of chromosome
startCoord,endCoord = int(lineList3[-1])-int(lineList[2])-int(lineList[3]),int(lineList3[-1])-int(lineList[2])
else: # positive orientation of sequence, start end coords found from left end chromosomes
startCoord, endCoord = int(lineList3[-2])+int(lineList[2]),int(lineList3[-2])+int(lineList[2])+int(lineList[3])
# using MAF header info, see if stripped MAF sequence is same as sequence grabbed from original Fasta to make sure backtracking works
for species in masterListSpecies:
if speciesName in species:
MAFseq = lineList[-1].replace('-','')
if lineList[4] == '-': # reverse complement find for negative orientation
conservedSeq = str(speciesInfo[species].genome[lineList2[-1][:lineList2[-1].find(lineList3[-2])-1]][startCoord:endCoord].reverse.complement)
else:
conservedSeq = str(speciesInfo[species].genome[lineList2[-1][:lineList2[-1].find(lineList3[-2])-1]][startCoord:endCoord])
# should have 100% validity
cacOutCount[speciesName]+=len(MAFseq)
validityCount[speciesName]+=similar(MAFseq,sub("[a-z]",'N',conservedSeq))*100.*len(MAFseq)
with open('backtrackCheck.txt','w') as f:
for speciesName in listIncludedSpecies:
print speciesName
#print validityCount[speciesName]
#print cacOutCount[speciesName]
f.write('%s\nCactus Output First Filtering = %dbps\nAverage backtrack percentage = %f Percent\n\n'%(speciesName,cacOutCount[speciesName],float(validityCount[speciesName])/float(cacOutCount[speciesName])))
| |
from unittest import TestCase
from iota import Address, Fragment, ProposedBundle, ProposedTransaction, Tag, \
TryteString
from iota.crypto.signing import KeyGenerator
from iota.crypto.types import Seed
from iota.transaction.types import BundleHash
class ProposedBundleTestCase(TestCase):
def setUp(self):
super(ProposedBundleTestCase, self).setUp()
# We will use a seed to generate addresses and private keys, to
# ensure a realistic scenario (and because the alternative is to
# inject mocks all over the place!).
self.seed =\
Seed(
b'TESTVALUE9DONTUSEINPRODUCTION99999RLC9CS'
b'ZUILGDTLJMRCJSDVEEJO9A9LHAEHMNAMVXRMOXTBN'
)
# To speed things up a little bit, though, we can pre-generate a
# few addresses to use as inputs.
self.input_0_bal_eq_42 =\
Address(
balance = 42,
key_index = 0,
security_level = 1,
trytes =
b'JBLDCCSI9VKU9ZHNZCUTC9NLQIIJX9SIKUJNKNKE'
b'9KKMHXFMIXHLKQQAVTTNPRCZENGLIPALHKLNKTXCU',
)
self.input_1_bal_eq_40 =\
Address(
balance = 40,
key_index = 1,
security_level = 1,
trytes =
b'KHWHSTISMVVSDCOMHVFIFCTINWZT9EHJUATYSMCX'
b'DSMZXPL9KXREBBYHJGRBCYVGPJQEHEDPXLBDJNQNX',
)
self.input_2_bal_eq_2 =\
Address(
balance = 2,
key_index = 2,
security_level = 1,
trytes =
b'GOAAMRU9EALPO9GKBOWUVZVQEJMB9CSGIZJATHRB'
b'TRRJPNTSQRZTASRBTQCRFAIDOGTWSHIDGOUUULQIG',
)
self.input_3_bal_eq_100 =\
Address(
balance = 100,
key_index = 3,
security_level = 1,
trytes =
b'9LPQCSJGYUJMLWKMLJ9KYUYJ9RMDBZZWPHXMGKRG'
b'YLOAZNKJR9VDYSONVAJRIPVWCOZKFMEKUSWHPSDDZ',
)
self.input_4_bal_eq_42_sl_2 =\
Address(
balance = 42,
key_index = 4,
security_level = 2,
trytes =
b'NVGLHFZWLEQAWBDJXCWJBMVBVNXEG9DALNBTAYMK'
b'EMMJ9BCDVVHJJLSTQW9JEJXUUX9JNFGALBNASRDUD',
)
self.input_5_bal_eq_42_sl_3 =\
Address(
balance = 42,
key_index = 5,
security_level = 3,
trytes =
b'XXYRPQ9BDZGKZZQLYNSBDD9HZLI9OFRK9TZCTU9P'
b'FAJYXZIZGO9BWLOCNGVMTLFQFMGJWYRMLXSCW9UTQ',
)
self.bundle = ProposedBundle()
def test_add_transaction_short_message(self):
"""
Adding a transaction to a bundle, with a message short enough to
fit inside a single transaction.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999AETEXB'
b'D9YBTH9EMFKF9CAHJIAIKDBEPAMH99DEN9DAJETGN'
),
message = TryteString.from_unicode('Hello, IOTA!'),
value = 42,
))
# We can fit the message inside a single fragment, so only one
# transaction is necessary.
self.assertEqual(len(self.bundle), 1)
def test_add_transaction_long_message(self):
"""
Adding a transaction to a bundle, with a message so long that it
has to be split into multiple transactions.
"""
address = Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999N9GIUF'
b'HCFIUGLBSCKELC9IYENFPHCEWHIDCHCGGEH9OFZBN'
)
tag = Tag.from_unicode('H2G2')
self.bundle.add_transaction(ProposedTransaction(
address = address,
tag = tag,
message = TryteString.from_unicode(
'''
"Good morning," said Deep Thought at last.
"Er... Good morning, O Deep Thought," said Loonquawl nervously.
"Do you have... er, that is..."
"... an answer for you?" interrupted Deep Thought majestically. "Yes. I have."
The two men shivered with expectancy. Their waiting had not been in vain.
"There really is one?" breathed Phouchg.
"There really is one," confirmed Deep Thought.
"To Everything? To the great Question of Life, the Universe and Everything?"
"Yes."
Both of the men had been trained for this moment; their lives had been a
preparation for it; they had been selected at birth as those who would
witness the answer; but even so they found themselves gasping and squirming
like excited children.
"And you're ready to give it to us?" urged Loonquawl.
"I am."
"Now?"
"Now," said Deep Thought.
They both licked their dry lips.
"Though I don't think," added Deep Thought, "that you're going to like it."
"Doesn't matter," said Phouchg. "We must know it! Now!"
"Now?" enquired Deep Thought.
"Yes! Now!"
"All right," said the computer and settled into silence again.
The two men fidgeted. The tension was unbearable.
"You're really not going to like it," observed Deep Thought.
"Tell us!"
"All right," said Deep Thought. "The Answer to the Great Question..."
"Yes?"
"Of Life, the Universe and Everything..." said Deep Thought.
"Yes??"
"Is..."
"Yes?!"
"Forty-two," said Deep Thought, with infinite majesty and calm.
'''
),
# Now you know....
# Eh, who am I kidding? You probably knew before I did (:
value = 42,
))
# Because the message is too long to fit into a single fragment,
# the transaction is split into two parts.
self.assertEqual(len(self.bundle), 2)
txn1 = self.bundle[0]
self.assertEqual(txn1.address, address)
self.assertEqual(txn1.tag, tag)
self.assertEqual(txn1.value, 42)
txn2 = self.bundle[1]
self.assertEqual(txn2.address, address)
self.assertEqual(txn2.tag, tag)
# Supplementary transactions are assigned zero IOTA value.
self.assertEqual(txn2.value, 0)
def test_add_transaction_error_already_finalized(self):
"""
Attempting to add a transaction to a bundle that is already
finalized.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION999999DCBIE'
b'U9AIE9H9BCKGMCVCUGYDKDLCAEOHOHZGW9KGS9VGH'
),
value = 0,
))
self.bundle.finalize()
with self.assertRaises(RuntimeError):
self.bundle.add_transaction(ProposedTransaction(
address = Address(b''),
value = 0,
))
def test_add_transaction_error_negative_value(self):
"""
Attempting to add a transaction with a negative value to a bundle.
Use :py:meth:`ProposedBundle.add_inputs` to add inputs to a bundle.
"""
with self.assertRaises(ValueError):
self.bundle.add_transaction(ProposedTransaction(
address = Address(b''),
value = -1,
))
def test_add_inputs_no_change(self):
"""
Adding inputs to cover the exact amount of the bundle spend.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999VELDTF'
b'QHDFTHIHFE9II9WFFDFHEATEI99GEDC9BAUH9EBGZ'
),
value = 29,
))
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999OGVEEF'
b'BCYAM9ZEAADBGBHH9BPBOHFEGCFAM9DESCCHODZ9Y'
),
value = 13,
))
self.bundle.add_inputs([
self.input_1_bal_eq_40,
self.input_2_bal_eq_2,
])
# Just to be tricky, add an unnecessary change address, just to
# make sure the bundle ignores it.
self.bundle.send_unspent_inputs_to(
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999FDCDFD'
b'VAF9NFLCSCSFFCLCW9KFL9TCAAO9IIHATCREAHGEA'
),
)
self.bundle.finalize()
# All of the addresses that we generate for this test case have
# security level set to 1, so we only need 1 transaction per
# input (4 total, including the spends).
#
# Also note: because the transaction is already balanced, no change
# transaction is necessary.
self.assertEqual(len(self.bundle), 4)
def test_add_inputs_with_change(self):
"""
Adding inputs to a bundle results in unspent inputs.
"""
tag = Tag(b'CHANGE9TXN')
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999VELDTF'
b'QHDFTHIHFE9II9WFFDFHEATEI99GEDC9BAUH9EBGZ'
),
value = 29,
))
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999OGVEEF'
b'BCYAM9ZEAADBGBHH9BPBOHFEGCFAM9DESCCHODZ9Y'
),
tag = tag,
value = 13,
))
self.bundle.add_inputs([self.input_3_bal_eq_100])
change_address =\
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999KAFGVC'
b'IBLHS9JBZCEFDELEGFDCZGIEGCPFEIQEYGA9UFPAE'
)
self.bundle.send_unspent_inputs_to(change_address)
self.bundle.finalize()
# 2 spends + 1 input (with security level 1) + 1 change
self.assertEqual(len(self.bundle), 4)
change_txn = self.bundle[-1]
self.assertEqual(change_txn.address, change_address)
self.assertEqual(change_txn.value, 58)
self.assertEqual(change_txn.tag, tag)
def test_add_inputs_security_level(self):
"""
Each input's security level determines the number of transactions
we will need in order to store the entire signature.
"""
self.bundle.add_transaction(
ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999XE9IVG'
b'EFNDOCQCMERGUATCIEGGOHPHGFIAQEZGNHQ9W99CH',
),
value = 84,
),
)
self.bundle.add_inputs([
self.input_4_bal_eq_42_sl_2,
self.input_5_bal_eq_42_sl_3,
])
self.bundle.finalize()
# Each input's security level determines how many transactions will
# be needed to hold all of its signature fragments:
# 1 spend + 2 fragments for input 0 + 3 fragments for input 1
self.assertEqual(len(self.bundle), 6)
def test_add_inputs_error_already_finalized(self):
"""
Attempting to add inputs to a bundle that is already finalized.
"""
# Add 1 transaction so that we can finalize the bundle.
self.bundle.add_transaction(
ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999XE9IVG'
b'EFNDOCQCMERGUATCIEGGOHPHGFIAQEZGNHQ9W99CH',
),
value = 0,
),
)
self.bundle.finalize()
with self.assertRaises(RuntimeError):
# Even though no inputs are provided, it's still an error; you
# shouldn't even be calling ``add_inputs`` once the bundle is
# finalized!
self.bundle.add_inputs([])
def test_send_unspent_inputs_to_error_already_finalized(self):
"""
Invoking ``send_unspent_inputs_to`` on a bundle that is already
finalized.
"""
# Add 1 transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999XE9IVG'
b'EFNDOCQCMERGUATCIEGGOHPHGFIAQEZGNHQ9W99CH'
),
value = 0,
))
self.bundle.finalize()
with self.assertRaises(RuntimeError):
self.bundle.send_unspent_inputs_to(Address(b''))
def test_finalize_error_already_finalized(self):
"""
Attempting to finalize a bundle that is already finalized.
"""
# Add 1 transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999XE9IVG'
b'EFNDOCQCMERGUATCIEGGOHPHGFIAQEZGNHQ9W99CH'
),
value = 0,
))
self.bundle.finalize()
with self.assertRaises(RuntimeError):
self.bundle.finalize()
def test_finalize_error_no_transactions(self):
"""
Attempting to finalize a bundle with no transactions.
"""
with self.assertRaises(ValueError):
self.bundle.finalize()
def test_finalize_error_negative_balance(self):
"""
Attempting to finalize a bundle with unspent inputs.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999IGEFUG'
b'LIHIJGJGZ9CGRENCRHF9XFEAWD9ILFWEJFKDLITCC'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42, self.input_2_bal_eq_2])
# Bundle spends 42 IOTAs, but inputs total 44 IOTAs.
self.assertEqual(self.bundle.balance, -2)
# In order to finalize this bundle, we need to specify a change
# address.
with self.assertRaises(ValueError):
self.bundle.finalize()
def test_finalize_error_positive_balance(self):
"""
Attempting to finalize a bundle with insufficient inputs.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999IGEFUG'
b'LIHIJGJGZ9CGRENCRHF9XFEAWD9ILFWEJFKDLITCC'
),
value = 42,
))
self.bundle.add_inputs([self.input_1_bal_eq_40])
# Bundle spends 42 IOTAs, but inputs total only 40 IOTAs.
self.assertEqual(self.bundle.balance, 2)
# In order to finalize this bundle, we need to provide additional
# inputs.
with self.assertRaises(ValueError):
self.bundle.finalize()
def test_finalize_insecure_bundle(self):
"""
When finalizing, the bundle detects an insecure bundle hash.
References:
- https://github.com/iotaledger/iota.py/issues/84
"""
bundle =\
ProposedBundle([
ProposedTransaction(
address =\
Address(
'9XV9RJGFJJZWITDPKSQXRTHCKJAIZZY9BYLBEQUX'
'UNCLITRQDR9CCD99AANMXYEKD9GLJGVB9HIAGRIBQ',
),
tag = Tag('PPDIDNQDJZGUQKOWJ9JZRCKOVGP'),
timestamp = 1509136296,
value = 0,
),
])
bundle.finalize()
# The resulting bundle hash is insecure (contains a [1, 1, 1]), so
# the legacy tag is manipulated until a secure hash is generated.
self.assertEqual(bundle[0].legacy_tag, Tag('ZTDIDNQDJZGUQKOWJ9JZRCKOVGP'))
# The proper tag is left alone, however.
self.assertEqual(bundle[0].tag, Tag('PPDIDNQDJZGUQKOWJ9JZRCKOVGP'))
# The bundle hash takes the modified legacy tag into account.
self.assertEqual(
bundle.hash,
BundleHash(
'NYSJSEGCWESDAFLIFCNJFWGZ9PCYDOT9VCSALKBD'
'9UUNKBJAJCB9KVMTHZDPRDDXC9UFJQBJBQFUPJKFC',
)
)
def test_sign_inputs(self):
"""
Signing inputs in a finalized bundle, using a key generator.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_1_bal_eq_40, self.input_2_bal_eq_2])
self.bundle.finalize()
self.bundle.sign_inputs(KeyGenerator(self.seed))
# Quick sanity check:
# 1 spend + 2 inputs (security level 1) = 3 transactions.
# Applying signatures should not introduce any new transactions
# into the bundle.
#
# Note: we will see what happens when we use inputs with different
# security levels in the next test.
self.assertEqual(len(self.bundle), 3)
# The spending transaction does not have a signature.
self.assertEqual(
self.bundle[0].signature_message_fragment,
Fragment(b''),
)
# The signature fragments are really long, and we already have unit
# tests for the signature fragment generator, so to keep this test
# focused, we are only interested in whether a signature fragment
# gets applied.
#
# References:
# - :py:class:`test.crypto.signing_test.SignatureFragmentGeneratorTestCase`
for i in range(1, len(self.bundle)):
if self.bundle[i].signature_message_fragment == Fragment(b''):
self.fail(
"Transaction {i}'s signature fragment is unexpectedly empty!".format(
i = i,
),
)
def test_sign_inputs_security_level(self):
"""
You may include inputs with different security levels in the same
bundle.
"""
self.bundle.add_transaction(
ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999XE9IVG'
b'EFNDOCQCMERGUATCIEGGOHPHGFIAQEZGNHQ9W99CH',
),
value = 84,
),
)
self.bundle.add_inputs([
self.input_4_bal_eq_42_sl_2,
self.input_5_bal_eq_42_sl_3,
])
self.bundle.finalize()
self.bundle.sign_inputs(KeyGenerator(self.seed))
# Quick sanity check.
self.assertEqual(len(self.bundle), 6)
# The spending transaction does not have a signature.
self.assertEqual(
self.bundle[0].signature_message_fragment,
Fragment(b''),
)
# The signature fragments are really long, and we already have unit
# tests for the signature fragment generator, so to keep this test
# focused, we are only interested in whether a signature fragment
# gets applied.
#
# References:
# - :py:class:`test.crypto.signing_test.SignatureFragmentGeneratorTestCase`
for i in range(1, len(self.bundle)):
if self.bundle[i].signature_message_fragment == Fragment(b''):
self.fail(
"Transaction {i}'s signature fragment is unexpectedly empty!".format(
i = i,
),
)
def test_sign_inputs_error_not_finalized(self):
"""
Attempting to sign inputs in a bundle that hasn't been finalized
yet.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
# Oops; did we forget something?
# self.bundle.finalize()
with self.assertRaises(RuntimeError):
self.bundle.sign_inputs(KeyGenerator(b''))
def test_sign_input_at_single_fragment(self):
"""
Signing an input at the specified index, only 1 fragment needed.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
self.bundle.finalize()
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_0_bal_eq_42)
self.bundle.sign_input_at(1, private_key)
# Only 2 transactions are needed for this bundle:
# 1 spend + 1 input (security level = 1).
self.assertEqual(len(self.bundle), 2)
# The spending transaction does not have a signature.
self.assertEqual(
self.bundle[0].signature_message_fragment,
Fragment(b''),
)
# The signature fragments are really long, and we already have unit
# tests for the signature fragment generator, so to keep this test
# focused, we are only interested in whether a signature fragment
# gets applied.
#
# References:
# - :py:class:`test.crypto.signing_test.SignatureFragmentGeneratorTestCase`
for i in range(1, len(self.bundle)):
if self.bundle[i].signature_message_fragment == Fragment(b''):
self.fail(
"Transaction {i}'s signature fragment is unexpectedly empty!".format(
i = i,
),
)
def test_sign_input_at_multiple_fragments(self):
"""
Signing an input at the specified index, multiple fragments needed.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_5_bal_eq_42_sl_3])
self.bundle.finalize()
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_5_bal_eq_42_sl_3)
self.bundle.sign_input_at(1, private_key)
# 1 spend + 3 inputs (security level = 3).
self.assertEqual(len(self.bundle), 4)
# The spending transaction does not have a signature.
self.assertEqual(
self.bundle[0].signature_message_fragment,
Fragment(b''),
)
# The signature fragments are really long, and we already have unit
# tests for the signature fragment generator, so to keep this test
# focused, we are only interested in whether a signature fragment
# gets applied.
#
# References:
# - :py:class:`test.crypto.signing_test.SignatureFragmentGeneratorTestCase`
for i in range(1, len(self.bundle)):
if self.bundle[i].signature_message_fragment == Fragment(b''):
self.fail(
"Transaction {i}'s signature fragment is unexpectedly empty!".format(
i = i,
),
)
def test_sign_input_at_error_not_finalized(self):
"""
Cannot sign inputs because the bundle isn't finalized yet.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
# Oops; did we forget something?
# self.bundle.finalize()
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_0_bal_eq_42)
with self.assertRaises(RuntimeError):
self.bundle.sign_input_at(1, private_key)
def test_sign_input_at_error_index_invalid(self):
"""
The specified index doesn't exist in the bundle.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
self.bundle.finalize()
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_0_bal_eq_42)
with self.assertRaises(IndexError):
self.bundle.sign_input_at(2, private_key)
def test_sign_input_at_error_index_not_input(self):
"""
The specified index references a transaction that is not an input.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
self.bundle.finalize()
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_0_bal_eq_42)
with self.assertRaises(ValueError):
# You can't sign the spend transaction, silly!
self.bundle.sign_input_at(0, private_key)
def test_sign_input_at_error_already_signed(self):
"""
Attempting to sign an input that is already signed.
"""
# Add a transaction so that we can finalize the bundle.
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 42,
))
self.bundle.add_inputs([self.input_0_bal_eq_42])
self.bundle.finalize()
# The existing signature fragment doesn't have to be valid; it just
# has to be not empty.
self.bundle[1].signature_message_fragment = Fragment(b'A')
private_key =\
KeyGenerator(self.seed).get_key_for(self.input_0_bal_eq_42)
with self.assertRaises(ValueError):
self.bundle.sign_input_at(1, private_key)
def test_create_tag_from_string(self):
"""
Check if string value of tag is converted into a Tag object
"""
transaction = ProposedTransaction(
address=
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
tag="AAAZZZZ999",
value=42,
)
self.assertEqual(type(transaction.tag), type(Tag(b'')))
def test_add_signature_or_message(self):
"""
Add a fragment to a transaction.
"""
# Add a transaction
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message = TryteString.from_unicode('This should be overwritten'),
value = 0,
))
custom_msg = \
'The early bird gets the worm, but the custom-msg gets into the bundle.'
custom_fragment = Fragment.from_unicode(custom_msg)
# Before finalization, the method adds to message field...
self.bundle.add_signature_or_message([custom_fragment])
self.assertEqual(
self.bundle._transactions[0].message,
custom_fragment
)
# ... because upon finalization, this is translated into
# signature_message_fragment field.
self.bundle.finalize()
self.assertEqual(
self.bundle._transactions[0].signature_message_fragment,
custom_fragment
)
# Do we have the right text inside?
self.assertEqual(
self.bundle.get_messages()[0],
custom_msg
)
def test_add_signature_or_messagee_multiple(self):
"""
Add multiple fragments.
"""
# Add 3 transactions to the bundle, For convenience, we use
# 3 different addresses, so they are not grouped together and
# bundle.get_messages() returns a list of messages mapping to
# the 3 transactions.
for i in ['A', 'B', 'C']:
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE' + i + 'DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message = TryteString.from_unicode('This should be overwritten'),
value = 0,
))
fragment1 = Fragment.from_unicode('This is the first fragment.')
fragment2 = Fragment.from_unicode('This is the second fragment.')
self.bundle.add_signature_or_message([fragment1, fragment2])
bundle_fragments = []
for tx in self.bundle:
bundle_fragments.append(tx.message)
self.assertListEqual(
bundle_fragments,
[fragment1, fragment2, TryteString.from_unicode('This should be overwritten')]
)
self.bundle.finalize()
bundle_fragments_unicode = []
for tx in self.bundle:
bundle_fragments_unicode.append(tx.signature_message_fragment.decode())
self.assertListEqual(
bundle_fragments_unicode,
[fragment1.decode(), fragment2.decode(), 'This should be overwritten']
)
def test_add_signature_or_message_multiple_offset(self):
"""
Add multiple fragments with offset.
"""
# Add 3 transactions to the bundle.
for i in ['A', 'B', 'C']:
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE' + i + 'DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message = TryteString.from_unicode('This should be overwritten'),
value = 0,
))
fragment1 = Fragment.from_unicode('This is the first fragment.')
fragment2 = Fragment.from_unicode('This is the second fragment.')
self.bundle.add_signature_or_message([fragment1, fragment2], 1)
bundle_fragments = []
for tx in self.bundle:
bundle_fragments.append(tx.message)
self.assertListEqual(
bundle_fragments,
[TryteString.from_unicode('This should be overwritten'), fragment1, fragment2]
)
self.bundle.finalize()
bundle_fragments_unicode = []
for tx in self.bundle:
bundle_fragments_unicode.append(tx.signature_message_fragment.decode())
self.assertListEqual(
bundle_fragments_unicode,
['This should be overwritten', fragment1.decode(), fragment2.decode()]
)
def test_add_signature_or_message_too_long_fragments(self):
"""
Trying to add too many fragments to a bundle, when there aren't enough
transactions to hold them.
"""
# Add 3 transactions to the bundle.
for i in ['A', 'B', 'C']:
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE' + i + 'DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message= TryteString.from_unicode('This should be overwritten'),
value = 0,
))
fragment1 = Fragment.from_unicode('This is the first fragment.')
# 4 fragments, 3 txs in bundle
fragments = [fragment1] * 4
with self.assertRaises(ValueError):
self.bundle.add_signature_or_message(fragments)
# Length is okay, but overflow because of offset
fragments = [fragment1] * 3
with self.assertRaises(ValueError):
self.bundle.add_signature_or_message(fragments,start_index=1)
def test_add_signature_or_message_invalid_start_index(self):
"""
Attempting to add fragments to a bundle, but `start_index` is invalid.
"""
# Add 3 transactions to the bundle.
for i in ['A', 'B', 'C']:
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE' + i + 'DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message = TryteString.from_unicode('This should be overwritten'),
value = 0,
))
fragment1 = Fragment.from_unicode('This is the first fragment.')
with self.assertRaises(ValueError):
self.bundle.add_signature_or_message([fragment1], start_index=-1)
with self.assertRaises(ValueError):
self.bundle.add_signature_or_message([fragment1], start_index=3)
with self.assertRaises(TypeError):
self.bundle.add_signature_or_message([fragment1], 'not an int')
def test_add_signature_or_message_empty_list(self):
"""
Try to add an empty list of fragments.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 0,
))
with self.assertRaises(ValueError):
self.bundle.add_signature_or_message([])
def test_add_signature_or_message_wrong_types(self):
"""
Try add signatures/messages with wrong type.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
value = 0,
))
with self.assertRaises(TypeError):
self.bundle.add_signature_or_message('Not a list')
with self.assertRaises(TypeError):
self.bundle.add_signature_or_message(['List but not Fragment'])
def test_add_signature_or_message_finalized_bundle(self):
"""
Try to call the method on a finalized bundle.
"""
self.bundle.add_transaction(ProposedTransaction(
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999QARFLF'
b'TDVATBVFTFCGEHLFJBMHPBOBOHFBSGAGWCM9PG9GX'
),
message = TryteString.from_unicode('This should be overwritten'),
value = 0,
))
custom_msg = \
'The early bird gets the worm, but the custom-msg gets into the bundle.'
custom_fragment = Fragment.from_unicode(custom_msg)
# Finalize the bundle, no further changes should be permitted.
self.bundle.finalize()
with self.assertRaises(RuntimeError):
self.bundle.add_signature_or_message([custom_fragment])
| |
from __future__ import unicode_literals
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there's only a single field to insert, the limit is 500
(SQLITE_MAX_COMPOUND_SELECT).
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def time_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_cast_date_sql(self, field_name, tzname):
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_cast_time_sql(self, field_name, tzname):
return "django_datetime_cast_time(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of parameters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index:index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif internal_type in ('NullBooleanField', 'BooleanField'):
converters.append(self.convert_booleanfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ and not timezone.is_aware(value):
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
return bool(value) if value in (1, 0) else value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
| |
import traceback
from binascii import hexlify
#from cancat.j1939 import *
# we can move things into here if we decide this replaces the exiting j1939 modules
import cancat
import struct
from cancat.J1939db import *
from cancat import *
from cancat.vstruct.bitfield import *
import queue
import threading
'''
This is a J1939 Stack module.
It's purpose is to provide a J1939-capable object which extends (consumes) CanCat's CanInterface module, and provides a J1939 interface.
In the original J1939 module, extended messages were treated like oddities. This module will work on the premise that all messages (except TP messages) are created equal, and are available in the same queue. TP messages will be the "specialty" messages which create new, arbitrary sized messages from smaller ones. If they don't make it through, they don't. This module is intended to make J1939 attack clients easier to work with, whereas the first module was focused on reverse engineering. Let's see if we can't come up with something awesome, then possible merge them together in the future.
This module focuses around PGNs. All messages are handled and sorted by their PS/DA.
'''
J1939MSGS = 1939
PF_RQST = 0xea
PF_TP_DT = 0xeb
PF_TP_CM = 0xec
PF_ADDRCLAIM = 0xee
PF_PROPRIETRY= 0xef
PF_KWP1 = 0xdb
PF_KWP2 = 0xda
PF_KWP3 = 0xce
PF_KWP4 = 0xcd
CM_RTS = 0x10
CM_CTS = 0x11
CM_EOM = 0x13
CM_ABORT = 0xff
CM_BAM = 0x20
TP_BAM = 20
TP_DIRECT = 10
TP_DIRECT_BROKEN=9
class NAME(VBitField):
def __init__(self):
VBitField.__init__(self)
self.arbaddrcap = v_bits(1)
self.ind_group = v_bits(3)
self.vehicle_system_instance = v_bits(4)
self.vehicle_system = v_bits(7)
self.reserved = v_bits(1)
self.function = v_bits(8)
self.function_instance = v_bits(5)
self.ecu_instance = v_bits(3)
self.mfg_code = v_bits(11)
self.identity_number = v_bits(21)
def minrepr(self):
mfgname = mfg_lookup.get(self.mfg_code)
return "id: 0x%x mfg: %s" % (self.identity_number, mfgname)
def parseName(name):
namebits= NAME()
rname = name[::-1]
namebits.vsParse(rname)
return namebits
def parseArbid(arbid):
(prioPlus,
pf,
ps,
sa) = struct.unpack(b'BBBB', struct.pack(b">I", arbid))
prio = prioPlus >> 2
edp = (prioPlus >> 1) & 1
dp = prioPlus & 1
return prio, edp, dp, pf, ps, sa
def meldExtMsgs(msgs):
out = []
length = msgs.get('totsize')
for arbtup, msg in msgs.get('msgs'):
out.append(msg[1:])
outval = b''.join(out)
outval = outval[:length]
return outval
### renderers for specific PF numbers
def pf_c9(idx, ts, arbtup, data, j1939):
b4 = data[3]
req = "%.2x %.2x %.2x" % ([ord(d) for d in data[:3]])
usexferpfn = ('', 'Use_Transfer_PGN', 'undef', 'NA')[b4 & 3]
return "Request2: %s %s" % (req, usexferpgn)
def pf_ea(idx, ts, arbtup, data, j1939):
(prio, edp, dp, pf, ps, sa) = arbtup
return "Request: %s" % (hexlify(data[:3]))
# no pf_eb or pf_ec since those are handled at a lower-level in this stack
def pf_ee(idx, ts, arbtup, data, j1939):
prio, edp, dp, pf, ps, sa = arbtup
if ps == 255 and sa == 254:
return 'CANNOT CLAIM ADDRESS'
addrinfo = parseName(data).minrepr()
return "Address Claim: %s" % addrinfo
def pf_ef(idx, ts, arbtup, data, j1939):
prio, edp, dp, pf, ps, sa = arbtup
if dp:
return 'Proprietary A2'
return 'Proprietary A1'
def pf_ff(idx, ts, arbtup, data, j1939):
prio, edp, dp, pf, ps, sa = arbtup
pgn = "%.2x :: %.2x:%.2x - %s" % (sa, pf,ps, hexlify(data))
return "Proprietary B %s" % pgn
pgn_pfs = {
0x93: ("Name Management", None),
0xc9: ("Request2", pf_c9),
0xca: ('Transfer', None),
0xe8: ("ACK ", None),
0xea: ("Request ", pf_ea),
0xeb: ("TP.DT (WTF?)", None),
0xec: ("TP.CM (WTF?)", None),
0xee: ("Address Claim", pf_ee),
0xef: ("Proprietary", pf_ef),
#0xfe: ("Command Address", None),
0xff: ("Proprietary B", pf_ff),
}
class J1939Interface(cancat.CanInterface):
_msg_source_idx = J1939MSGS
def __init__(self, port=None, baud=cancat.baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, process_can_msgs=True, promisc=True):
self._last_recv_idx = -1
self._threads = []
self._j1939_filters = []
self._j1939_msg_events = {}
self._j1939queuelock = threading.Lock()
self._TPmsgParts = {}
self.maxMsgsPerPGN = 0x200
self._j1939_msg_listeners = []
self.promisc = promisc
cancat.CanInterface.__init__(self, port=port, baud=baud, verbose=verbose, cmdhandlers=cmdhandlers, comment=comment, load_filename=load_filename, orig_iface=orig_iface)
# setup the message handler event offload thread
if self._config.get('myIDs') is None:
self._config['myIDs'] = []
self._mhe_queue = queue.Queue()
mhethread = threading.Thread(target=self._mhe_runner)
mhethread.setDaemon(True)
mhethread.start()
self._threads.append(mhethread)
self.register_handler(CMD_CAN_RECV, self._j1939_can_handler)
if process_can_msgs:
self.processCanMessages()
# restore other config items
def processCanMessages(self, delete=True):
for msg in self.recvall(CMD_CAN_RECV):
self._j1939_can_handler(msg, None)
def setPromiscuous(self, promisc=True):
'''
Determines whether messages not destined for an ID I currently own (self._config['myIDs'] are kept/handled or discarded
'''
self.promisc = promisc
def addID(self, newid):
if newid not in self._config['myIDs']:
self._config['myIDs'].append(newid)
def delID(self, curid):
if curid in self._config['myIDs']:
self._config['myIDs'].remove(curid)
def J1939xmit(self, pf, ps, sa, data, prio=6, edp=0, dp=0):
if len(data) < 8:
arbid = emitArbid(prio, edp, dp, pf, ps, sa)
# print("TX: %x : %r" % (arbid, hexlify(data)))
self.CANxmit(arbid, data, extflag=1)
return
self._j1939xmit_tp(pf, ps, sa, data, prio, edp, dp)
def _j1939xmit_tp(self, pf, ps, sa, message, prio=6, edp=0, dp=0):
pgn2 = (edp << 1) | dp
pgn1 = pf
if pgn1 < 240:
pgn0 = 0
else:
pgn0 = ps
msgs = ['%c'%(x+1) + message[x*7:(x*7)+7] for x in range((len(message)+6)//7)]
if len(msgs) > 255:
raise Exception("J1939xmit_tp: attempt to send message that's too large")
cm_msg = struct.pack('<BHBBBBB', CM_RTS, len(message), len(msgs), 0xff,
pgn2, pgn1, pgn0)
arbid = emitArbid(prio, edp, dp, PF_TP_CM, ps, sa)
# print("TXe: %x : %r" % (arbid, hexlify(cm_msg)))
self.CANxmit(arbid, cm_msg, extflag=1)
time.sleep(.01) # hack: should watch for CM_CTS
for msg in msgs:
#self.J1939xmit(PF_TP_DT, ps, sa, msg, prio=prio)
arbid = emitArbid(prio, edp, dp, PF_TP_DT, ps, sa)
print("TXe: %x : %r" % (arbid, hexlify(msg)))
self.CANxmit(arbid, msg, extflag=1)
# hack: should watch for CM_EOM
def _reprCanMsg(self, idx, ts, arbtup, data, comment=None):
#print("_reprCanMsg: %r %r" % (args, kwargs))
if comment is None:
comment = ''
prio, edp, dp, pf, ps, sa = arbtup
# give name priority to the Handler, then the manual name (this module), then J1939PGNdb
pfmeaning, handler = pgn_pfs.get(pf, ('',None))
nextline = ''
if handler is not None:
enhanced = handler(idx, ts, arbtup, data, self)
if enhanced == cancat.DONT_PRINT_THIS_MESSAGE:
return enhanced
if enhanced is not None:
if type(enhanced) in (list, tuple) and len(enhanced):
pfmeaning = enhanced[0]
if len(enhanced) > 1:
nextline = '\n'.join(list(enhanced[1:]))
# if we get multiple lines and the first is DONT_PRINT_THIS_MESSAGE,
# then just return nextline
if pfmeaning == cancat.DONT_PRINT_THIS_MESSAGE:
return nextline
nextline = '\n' + nextline
else:
pfmeaning = enhanced
elif not len(pfmeaning):
pgndata = parsePGNData(pf, ps, data)
pfmeaning = pgndata.get('pgndata').get('Name')
lines = []
if self.verbose:
for spnum, spdict, spunit, spdata, sprepr in pgndata.get('spns'):
spnName = spdict.get('Name')
lines.append(' SPN(%d): %-20s \t %s' % (spnum, sprepr, spnName))
if len(lines):
nextline = '\n' + '\n'.join(lines)
return "%.8d %8.3f pri/edp/dp: %d/%d/%d, PG: %.2x %.2x Source: %.2x Data: %-18s %s\t\t%s%s" % \
(idx, ts, prio, edp, dp, pf, ps, sa, hexlify(data), pfmeaning, comment, nextline)
def _j1939_can_handler(self, tsmsg, none):
'''
this function is run for *Every* received CAN message... and is executed from the
XMIT/RECV thread. it *must* be fast!
'''
#print(repr(self), repr(cmd), repr(tsmsg))
ts, message = tsmsg
arbid, data = self._splitCanMsg(message)
arbtup = parseArbid(arbid)
prio, edp, dp, pf, ps, sa = arbtup
# if i don't care about this message... bail. (0xef+ is multicast)
if pf < 0xef and ps not in self._config['myIDs'] and not self.promisc:
return
if pf == 0xeb:
self.queueMessageHandlerEvent(self.eb_handler, arbtup, data, ts)
elif pf == 0xec:
self.queueMessageHandlerEvent(self.ec_handler, arbtup, data, ts)
else:
self.queueMessageHandlerEvent(self._submitJ1939Message, arbtup, data, ts)
#print("submitted message: %r" % (hexlify(message)))
def queueMessageHandlerEvent(self, pfhandler, arbtup, data, ts):
'''
this is run in the XMIT/RECV thread and is intended to handle offloading the data fast
'''
self._mhe_queue.put((pfhandler, arbtup, data, ts))
def _mhe_runner(self):
'''
runs the mhe thread, which is offloaded so that the message-handling thread can keep going
'''
while not self._config.get('shutdown'):
while self._config['go']:
worktup = None
try:
worktup = self._mhe_queue.get(1)
if worktup is None:
continue
pfhandler, arbtup, data, ts = worktup
pfhandler(arbtup, data, ts)
except Exception as e:
print("(j1939stack)MsgHandler ERROR: %r (%r)" % (e, worktup))
if self.verbose:
sys.excepthook(*sys.exc_info())
time.sleep(1)
def _submitJ1939Message(self, arbtup, message, timestamp=None):
'''
submits a message to the cmd mailbox. creates mbox if doesn't exist.
*threadsafe*
often runs in the MHE thread
'''
#print("_submitJ1939Message")
if timestamp is None:
timestamp = time.time()
prio, edp, dp, pf, ps, sa = arbtup
pgn = (pf<<8) | ps
datarange = (edp<<1) | dp
if len(self._j1939_filters):
lcls = locals()
for advf in self._j1939_filters:
try:
if not eval(advf, lcls):
return
except Exception as e:
print("_submitJ1939Message advfilter ERROR: %r" % e)
return
self._j1939queuelock.acquire()
try:
# do we want to break things apart into PGN mboxes at this point? if so, we have to also allow
# subscription at this level for things like sniffing. Like this:
handled = False
for listener in self._j1939_msg_listeners:
try:
contnu = listener(arbtup, message)
if contnu:
handled = True
except Exception as e:
self.log('_submitJ1939Message: ERROR: %r' % e)
# check for any J1939 registered handlers (using the default system handlers):
cmdhandler = self._cmdhandlers.get(J1939MSGS)
if cmdhandler is not None:
handled2 = cmdhandler(tsmsg, self)
if handled and handled2:
#print("handled")
return
##::: TODO, make this a listener. if at all...
#dr = self._messages.get(datarange)
#if dr is None:
#dr = {}
#self._messages[datarange] = dr
#
## factor in multicast vs. unicast...
#mbox = dr.get(pf)
#if mbox is None:
#mbox = []
#dr[pf] = mbox
#self._j1939_msg_events[pf] = threading.Event()
# file in the mailbox
mbox = self._messages.get(J1939MSGS)
if mbox is None:
mbox = []
self._messages[J1939MSGS] = mbox
msgevt = self._j1939_msg_events.get(J1939MSGS)
if msgevt is None:
msgevt = threading.Event()
self._j1939_msg_events[J1939MSGS] = msgevt
#mbox.append((pf, ps, sa, edp, dp, prio, timestamp, message))
mbox.append((timestamp, arbtup, message))
msgevt.set()
##self._j1939_msg_events[pf].set()
# note: this event will trigger for any of the data ranges, as long as the PF is correct... this may be a problem.
# FIXME: come back to this...
except Exception as e:
self.log("_submitMessage: ERROR: %r" % e, -1)
if self.verbose:
sys.excepthook(*sys.exc_info())
finally:
self._j1939queuelock.release()
def getJ1939MsgCount(self):
j1939Msgs = self._messages.get(J1939MSGS)
if j1939Msgs is None:
return 0
return len(j1939Msgs)
def subscribe(self, msg_handler):
if msg_handler not in self._j1939_msg_listeners:
self._j1939_msg_listeners.append(msg_handler)
def unsubscribe(self, msg_handler):
if msg_handler in self._j1939_msg_listeners:
self._j1939_msg_listeners.remove(msg_handler)
def ec_handler(j1939, arbtup, data, ts):
'''
special handler for TP_CM messages
pgn2 is PS/DA
pgn1 is PF
pgn0 is prio/edp/dp
'''
def tp_cm_10(arbtup, data, j1939): # RTS
(prio, edp, dp, pf, da, sa) = arbtup
(cb, totsize, pktct, maxct,
pgn2, pgn1, pgn0) = struct.unpack('<BHBBBBB', data)
# check for old stuff
extmsgs = j1939.getTPmsgParts(da, sa)
if extmsgs is not None and len(extmsgs['msgs']):
pgn2 = extmsgs['pgn2']
pgn1 = extmsgs['pgn1']
pgn0 = extmsgs['pgn0']
j1939.saveTPmsg(da, sa, (pgn2, pgn1, pgn0), meldExtMsgs(extmsgs), TP_DIRECT_BROKEN)
j1939.clearTPmsgParts(da, sa)
# store extended message information for other stuff...
extmsgs = j1939.getTPmsgParts(da, sa, create=True)
extmsgs['ts'] = ts
extmsgs['sa'] = sa
extmsgs['da'] = da
extmsgs['pgn2'] = pgn2
extmsgs['pgn1'] = pgn1
extmsgs['pgn0'] = pgn0
extmsgs['maxct'] = maxct
extmsgs['length'] = pktct
extmsgs['totsize'] = totsize
extmsgs['type'] = TP_DIRECT
extmsgs['adminmsgs'].append((arbtup, data))
# RESPOND!
if da in j1939._config['myIDs']:
response = struct.pack('<BBBHBBB', CM_CTS, pktct, 1, 0, pgn2, pgn1, pgn0)
j1939.J1939xmit(0xec, sa, da, response, prio)
def tp_cm_11(arbtup, data, j1939): # CTS
(prio, edp, dp, pf, da, sa) = arbtup
(cb, maxpkts, nextpkt, reserved,
pgn2, pgn1, pgn0) = struct.unpack('<BBBHBBB', data)
# store extended message information for other stuff...
extmsgs = j1939.getTPmsgParts(da, sa)
if extmsgs is None:
return
extmsgs['adminmsgs'].append((arbtup, data))
# SOMEHOW WE TRIGGER THE CONTINUATION OF TRANSMISSION
def tp_cm_13(arbtup, data, j1939): # EOM
(prio, edp, dp, pf, da, sa) = arbtup
(cb, totsize, pktct, maxct,
pgn2, pgn1, pgn0) = struct.unpack('<BHBBBBB', data)
# print(out extended message and clear the buffers.)
extmsgs = j1939.getTPmsgParts(da, sa)
if extmsgs is None:
return
extmsgs['adminmsgs'].append((arbtup, data))
j1939.clearTPmsgParts(da, sa)
# Coolio, they just confirmed receipt, we're done!
# Probably need to trigger some mechanism telling the originator
def tp_cm_20(arbtup, data, j1939): # BROADCAST MESSAGE (BAM)
(prio, edp, dp, pf, da, sa) = arbtup
(cb, totsize, pktct, reserved,
pgn2, pgn1, pgn0) = struct.unpack('<BHBBBBB', data)
# check for old stuff
extmsgs = j1939.getTPmsgParts(da, sa)
if extmsgs is not None and len(extmsgs['msgs']):
pgn2 = extmsgs['pgn2']
pgn1 = extmsgs['pgn1']
pgn0 = extmsgs['pgn0']
j1939.saveTPmsg(da, sa, (pgn2, pgn1, pgn0), meldExtMsgs(extmsgs), TP_DIRECT_BROKEN)
j1939.clearTPmsgParts(da, sa)
# store extended message information for other stuff...
extmsgs = j1939.getTPmsgParts(da, sa, create=True)
extmsgs['ts'] = ts
extmsgs['sa'] = sa
extmsgs['da'] = da
extmsgs['pgn2'] = pgn2
extmsgs['pgn1'] = pgn1
extmsgs['pgn0'] = pgn0
extmsgs['maxct'] = 0
extmsgs['length'] = pktct
extmsgs['totsize'] = totsize
extmsgs['type'] = TP_BAM
extmsgs['adminmsgs'].append((arbtup, data))
# call the right TP_CM handler
tp_cm_handlers = {
CM_RTS: ('RTS', tp_cm_10),
CM_CTS: ('CTS', tp_cm_11),
CM_EOM: ('EndOfMsgACK', tp_cm_13),
CM_BAM: ('BAM-Broadcast', tp_cm_20),
CM_ABORT: ('Abort', None),
}
cb = data[0]
#print("ec: %.2x%.2x %.2x" % (arbtup[3], arbtup[4], cb))
htup = tp_cm_handlers.get(cb)
if htup is not None:
subname, cb_handler = htup
if cb_handler is not None:
cb_handler(arbtup, data, j1939)
def eb_handler(j1939, arbtup, data, ts):
'''
special handler for TP_DT messages
'''
(prio, edp, dp, pf, da, sa) = arbtup
if len(data) < 1:
j1939.log('pf=0xeb: TP ERROR: NO DATA!')
return
extmsgs = j1939.getTPmsgParts(da, sa)
if extmsgs is None:
j1939.log("TP_DT: haven't received TP_CM control setup, skipping")
return
extmsgs['msgs'].append((arbtup, data))
if len(extmsgs['msgs']) >= extmsgs['length']:
# we're done building this message, submit it!
#print("eb_handler: saving: %r %r" % (len(extmsgs['msgs']) , extmsgs['length']))
pgn2 = extmsgs['pgn2']
pgn1 = extmsgs['pgn1']
pgn0 = extmsgs['pgn0']
mtype = extmsgs['type']
j1939.saveTPmsg(da, sa, (pgn2, pgn1, pgn0), meldExtMsgs(extmsgs), mtype)
j1939.clearTPmsgParts(da, sa)
# if this is the end of a message to *me*, reply accordingly
if da in j1939._config['myIDs']:
if mtype is None:
j1939.log("TP_DT_handler: missed beginning of message, not sending EOM: %r" % \
repr(extmsgs), 1)
return
j1939.log("tp_stack: sending EOM extmsgs: %r" % extmsgs, 1)
pgn2 = extmsgs['pgn2']
pgn1 = extmsgs['pgn1']
pgn0 = extmsgs['pgn0']
totsize = extmsgs['totsize']
maxct = extmsgs['maxct']
pktct = extmsgs['length']
data = struct.pack(b'<BHBBBBB', CM_EOM, totsize, pktct, maxct, pgn2, pgn1, pgn0)
j1939.J1939xmit(PF_TP_CM, sa, da, data, prio=prio)
# functions to support the J1939TP Stack (real stuff, not just repr)
'''
these functions support TP messaging. Message parts are stored as PF lists within DA dicts within SA dicts.
ie.:
self_TPmsgParts[sa][da][pf]
this allows for clearing of entire parts of the transient stack easily by SA.
The main message stack has a *different* hierarchy based on what's easiest for developing client code to access.
'''
def getTPmsgParts(self, da, sa, create=False):
'''
# functions to support the J1939TP Stack (real stuff, not just repr)
returns a message list for a given source and destination (sa, da)
if no list exists for this pairing, one is created and an empty list is returned
'''
msglists = self._TPmsgParts.get(sa)
if msglists is None:
msglists = {}
self._TPmsgParts[sa] = msglists
mlist = msglists.get(da)
if mlist is None and create:
# create something new
mlist = {'length':0,
'msgs':[],
'type':None,
'adminmsgs':[],
'pgn0':None,
'pgn1':None,
'pgn2':None,
'totsize':0,
'maxct':0xff,
'sa' : sa,
'da' : da,
}
msglists[da] = mlist
return mlist
def clearTPmsgParts(self, da, sa):
'''
# functions to support the J1939TP Stack (real stuff, not just repr)
clear out extended messages metadata.
if da is None, this clears *all* message data for a given source address
returns whether the thing deleted exists previously
* if da is None, returns whether the sa had anything previously
* otherwise, if the list
'''
exists = False
if da is None:
msglists = self._TPmsgParts.get(sa)
exists = bool(msglists is not None and len(msglists))
self._TPmsgParts[sa] = {}
return exists
msglists = self._TPmsgParts.get(sa)
if msglists is None:
msglists = {}
self._TPmsgParts[sa] = msglists
if da in msglists:
msglists.pop(da)
return True
return False
def saveTPmsg(self, da, sa, pgn, msg, tptype):
'''
# functions to support the J1939TP Stack (real stuff, not just repr)
store a TP message.
'''
pgn2, pgn1, pgn0 = pgn
ps = pgn2
pf = pgn1
prio = pgn0 >> 2
edp = (pgn0 >> 1) & 1
dp = pgn0 & 1
if da != ps and self.verbose:
print("saveTPmsg: WARNING: da: 0x%x but ps: 0x%x. using ps" % (da, ps))
print(da, sa, pgn, repr(msg))
arbtup = prio, edp, dp, pf, ps, sa
self._submitJ1939Message(arbtup, msg)
def _getLocals(self, idx, ts, arbtup, data):
#print("getLocals:",idx, ts, arbtup, data)
prio, edp, dp, pf, ps, sa = arbtup
pgn = (pf << 8) | ps
lcls = {'idx': idx,
'ts': ts,
'data': data,
'priority': prio,
'edp': edp,
'dp': dp,
'pf': pf,
'ps': ps,
'sa': sa,
'pgn': pgn,
'da': ps,
'ge': ps,
}
return lcls
def genCanMsgs(self, start=0, stop=None, arbids=None, tail=False, maxsecs=None):
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list)
maxsecs limits the number of seconds this generator will go for. it's intended
for use with tail
'''
messages = self.getCanMsgQueue()
if messages is None and not tail:
return
# get the ts of the first received message
if messages is not None and len(messages):
startts = messages[0][0]
else:
startts = time.time()
if start is None:
start = self.getJ1939MsgCount()
if stop is None or tail:
stop = len(messages)
else:
stop = stop + 1 # This makes the stop index inclusive if specified
starttime = time.time()
idx = start
while tail or idx < stop:
# obey our time restrictions
# placed here to ensure checking whether we're receiving messages or not
if maxsecs is not None and time.time() > maxsecs+starttime:
return
# If we start sniffing before we receive any messages,
# messages will be "None". In this case, each time through
# this loop, check to see if we have messages, and if so,
# re-create the messages handle
if messages is None:
messages = self.getCanMsgQueue()
# if we're off the end of the original request, and "tailing"
if tail and idx >= stop:
msgqlen = len(messages)
self.log("stop=%d len=%d" % (stop, msgqlen), 3)
if stop == msgqlen:
self.log("waiting for messages", 3)
# wait for trigger event so we're not constantly polling
self._msg_events[self._msg_source_idx].wait(1)
self._msg_events[self._msg_source_idx].clear()
self.log("received 'new messages' event trigger", 3)
# we've gained some messages since last check...
stop = len(messages)
continue # to the big message loop.
# now actually handle messages
# here's where we get J1939 specific...
#print(messages[idx])
ts, arbtup, data = messages[idx]
#datatup = self._splitCanMsg(msg)
# make ts an offset instead of the real time.
ts -= startts
#if arbids is not None and arbid not in arbids:
# # allow filtering of arbids
# idx += 1
# continue
yield((idx, ts, arbtup, data))
idx += 1
def J1939recv(self, pf, ps, sa, msgcount=1, timeout=1, start_msg=None, update_last_recv=True):
out = []
if start_msg is None:
start_msg = self._last_recv_idx
#for msg in self.filterCanMsgs(start_msg=start_msg, advfilters=advfilters, tail=True, maxsecs=timeout):
#(idx, ts, arbid, data) = msg
#out.append(msg)
#self._last_recv_idx = msg[0]
# FIXME: add in the wait/set interaction for lower perf impact.
startts = time.time()
cur = start_msg
mque = self._messages[J1939MSGS]
while time.time() < (startts + timeout):
if cur >= len(mque):
time.sleep(.001)
continue
ts, arbtup, msg = mque[cur]
cur += 1
# we have a message now, does the PGN match?
mprio, medp, mdp, mpf, mps, msa = arbtup
if mpf != pf or mps != ps or msa != sa:
continue
# it's passed the checks... add it to the queue
out.append((ts, arbtup, msg))
if len(out) >= msgcount:
break
# if we actually found something, and we wanted to update last recvd...
if len(out) and update_last_recv:
self._last_recv_idx = cur
return out
def J1939recv_loose(self, pf=(), ps=None, sa=None, msgcount=1, timeout=1, start_msg=None, update_last_recv=True):
out = []
if start_msg is None:
start_msg = self._last_recv_idx
#for msg in self.filterCanMsgs(start_msg=start_msg, advfilters=advfilters, tail=True, maxsecs=timeout):
#(idx, ts, arbid, data) = msg
#out.append(msg)
#self._last_recv_idx = msg[0]
# FIXME: add in the wait/set interaction for lower perf impact.
startts = time.time()
cur = start_msg
mque = self._messages[J1939MSGS]
while time.time() < (startts + timeout):
if cur >= len(mque):
time.sleep(.001)
continue
ts, arbtup, msg = mque[cur]
cur += 1
# we have a message now, does the PGN match? (loose matching)
mprio, medp, mdp, mpf, mps, msa = arbtup
if pf is not None:
if type(pf) in (tuple, list):
if mpf not in pf:
continue
else:
if mpf != pf:
continue
if ps is not None:
if type(ps) in (tuple, list):
if mps not in ps:
continue
else:
if mps != ps:
continue
if sa is not None:
if type(sa) in (tuple, list):
if msa not in sa:
continue
else:
if msa != sa:
continue
# it's passed the checks... add it to the queue
out.append((ts, arbtup, msg))
if len(out) >= msgcount:
break
# if we actually found something, and we wanted to update last recvd...
if len(out) and update_last_recv:
self._last_recv_idx = cur
return out
def J1939xmit_recv(self, pf, ps, sa, data, recv_count=1, prio=6, edp=0, dp=0, timeout=1, expected_pf=None):
msgidx = self.getCanMsgCount()
# FIXME: filter on the expected response PGN
if expected_pf is None:
expected_pf = pf
res = self.J1939xmit(pf, ps, sa, data, prio, edp, dp)
res = self.J1939recv(expected_pf, sa, ps, recv_count, timeout, start_msg=msgidx)
return res
def J1939_Request(self, rpf, rda_ge=0, redp=0, rdp=0, da=0xff, sa=0xfe, prio=0x6, recv_count=255, timeout=2, expected_pf=None):
pgnbytes = [rda_ge, rpf, redp<<1 | rdp]
data = ''.join([chr(x) for x in pgnbytes])
data += '\xff' * (8-len(data))
if expected_pf is None:
expected_pf = rpf
self.J1939xmit(PF_RQST, da, sa, data)
msgs = self.J1939recv_loose(pf=expected_pf, msgcount=10, timeout=timeout)
return msgs
def J1939_ClaimAddress(self, addr, name=0x4040404040404040, prio=6):
data = struct.pack(">Q", name)
out = self.J1939xmit_recv(pf=PF_ADDRCLAIM, ps=0xff, sa=addr, data=data, recv_count=10, prio=prio<<2, timeout=2, expected_pf=0xee)
self.addID(addr)
return out
def J1939_ArpAddresses(self):
'''
Sends a request for all used addresses... not fully tested
'''
#idx = self.getCanMsgCount()
msgs = self.J1939_Request(PF_ADDRCLAIM, recv_count=255, timeout=3)
'''
# FIXME: these are way too loose, for discovery only. tighten down.
recv_filters = [
'pf < 0xf0',
#'pf == 0xee',
]
msgs = self.J1939recv(msgcount=200, timeout=3, advfilters=recv_filters, start_msg=idx)
'''
for msg in msgs:
try:
msgrepr = self._reprCanMsg(*msg)
if msgrepr != cancat.DONT_PRINT_THIS_MESSAGE:
print(msgrepr)
except Exception as e:
print(e)
'''
example (from start of ECU):
00000000 1545142410.990 pri/edp/dp: 6/0/0, PG: ea ff Source: fe Len: 03, Data: 00ee00 Request
00000001 1545142411.077 pri/edp/dp: 6/0/0, PG: ee ff Source: 00 Len: 08, Data: 4cca4d0100000000 Address Claim: id: 0xdca4c mfg: Cummins Inc (formerly Cummins Engine Co) Columbus, IN USA
currently ours:
00001903 1545142785.127 pri/edp/dp: 6/0/0, PG: ea ff Source: fe Len: 03, Data: 00ee00 Request
'''
MAX_WORD = 64
bu_masks = [(2 ** (i)) - 1 for i in range(8*MAX_WORD+1)]
def parsePGNData(pf, ps, msg):
# piece the correct PGN together from PF/PS
if pf < 0xec:
pgn = pf << 8
else:
pgn = (pf << 8) | ps
# grab the PGN data
res = J1939PGNdb.get(pgn)
out = {'pgn': pgn, 'pgndata': res}
spnlist = res.get('SPNs')
spndata = []
var_len_idx = 0
for spnum in spnlist:
# get SPN data
spn = J1939SPNdb.get(spnum)
if spn is None:
continue
# graciously refactored code from TruckDevil (hey LBD!)
spnlen = spn.get('SPNLength')
pgnlen = spn.get('PGNLength')
spnName = spn.get('Name')
spnRepr = ''
datanum = -1
units = spn.get("Units")
# skip variable-length PGNs for now
if (type(pgnlen) == str and 'ariable' in pgnlen):
datablob = msg
endBit = endBitO = startByte = startBitO = startBit = 0
else: # FIXME: Rework this section
startBit = spn.get('StartBit')
endBit = spn.get('EndBit')
startByte = startBit // 8
startBitO = startBit % 8
endByte = (endBit + 7) // 8
endBitO = endBit % 8
datablob = msg[startByte:endByte]
#print("sb: %d\t eb: %d\t sB:%d\t SBO:%d\t eB:%d\t eBO:%d\t %r" % (startBit, endBit, startByte, startBitO, endByte, endBitO, datablob))
if units == 'ASCII':
spnRepr = repr(datablob)
datanum = datablob
else:
try:
# carve out the number
datanum = 0
datafloat = 0.0
numbytes = struct.unpack('%dB' % len(datablob), datablob)
for i, n in enumerate(numbytes):
datanum |= (n << (8*i))
#print("datanum (working): 0x%x :: 0x%x" % (n, datanum))
#datanum <<= 8
datanum >>= (7 - endBitO)
#print("datanum: %x" % datanum)
mask = bu_masks[endBit - startBit + 1]
datanum &= mask
#print("datanum(2): %x" % datanum)
offset = spn.get('Offset')
if offset is not None:
datanum += int(offset)
datafloat += offset
#print("datanum: %x (mask: %x)" % (datanum, mask))
# make sense of the number based on units
if units == 'bit':
meaning = ''
bitdecode = J1939BitDecodings.get(spnum)
if bitdecode is not None:
meaning = bitdecode.get(datanum)
spnRepr = '0x%x (%s)' % (datanum, meaning)
elif units == 'binary':
spnRepr = "%s (%x)" % (bin(int(datanum)), datanum)
elif units == '%':
spnRepr = "%d%%" % datanum
else:
# some other unit with a resolution
resolution = spn.get('Resolution')
if resolution is not None:
datanum *= resolution
spnRepr = '%.3f %s' % (datafloat, units)
except Exception as e:
spnRepr = "ERROR"
print("SPN: %r %r (%r)" % (e, msg, spn))
traceback.print_exc()
spndata.append((spnum, spn, units, datanum, spnRepr))
out['spns'] = spndata
return out
| |
# -*- coding: utf-8
"""The connections.py module allows for the creation of an app
which will contain required information for the API. It also
allows for the storage of login information for authentication.
"""
from __future__ import absolute_import, print_function
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from oaxmlapi.base import _Base
from oaxmlapi.datatypes import Datatype
class Application(object):
"""
Use the Application command to collect application information.
Arguments:
client (str): a client string
client_version (str): a client_version string
namespace (str): a namespace string
key (str): a key string
"""
def __init__(self, client, client_version, namespace, key):
self.client = client
self.client_version = client_version
self.namespace = namespace
self.key = key
def __str__(self):
return "<Application client={client} version={version}>".format(
client=self.client, version=self.client_version)
class Auth(_Base):
"""
Use the Auth command to collect authentication information.
Arguments:
company (str): a company string
username (str): a username string
password (str): a password string
"""
def __init__(self, company, username, password):
_Base.__init__(self)
self.company = company
self.username = username
self.password = password
def __str__(self):
return "<Auth company={company} username={username}>".format(
company=self.company, username=self.username)
def auth(self):
"""
Returns an ElementTree object containing an XML Auth tag.
"""
auth = ET.Element('Auth')
login = ET.SubElement(auth, 'Login')
company = ET.Element('company')
company.text = self.company
username = ET.Element('user')
username.text = self.username
password = ET.Element('password')
password.text = self.password
login.append(company)
login.append(username)
login.append(password)
return auth
def _main(self):
return self.auth()
class RemoteAuth(_Base):
"""
Use the RemoteAuth command to log in to an individual user account.
Arguments:
company (str): a company string
username (str): a username string
password (str): a password string
"""
def __init__(self, company, username, password):
_Base.__init__(self)
self.company = company
self.username = username
self.password = password
def __str__(self):
return "<RemoteAuth company={company} username={username}>".format(
company=self.company, username=self.username)
def remoteauth(self):
"""
Returns an ElementTree object containing an XML RemoteAuth tag.
"""
remoteauth = ET.Element('RemoteAuth')
login = ET.SubElement(remoteauth, 'Login')
company = ET.Element('company')
company.text = self.company
username = ET.Element('user')
username.text = self.username
password = ET.Element('password')
password.text = self.password
login.append(company)
login.append(username)
login.append(password)
return remoteauth
def _main(self):
return self.remoteauth()
class Whoami(_Base):
"""
Use the Whoami command to return info about the authenticated user.
Arguments:
datatype (obj): a Datatype object
"""
def __init__(self, datatype):
_Base.__init__(self)
self.datatype = datatype
def __str__(self):
return "<Whoami>"
@property
def datatype(self):
return self._datatype
@datatype.setter
def datatype(self, d):
if not isinstance(d, Datatype):
raise Exception('you must pass a Datatype object')
elif not d.type == 'User':
raise Exception('you must pass a User Datatype not "{type}"').format(
type=d.type
)
self._datatype = d
def whoami(self):
"""
Returns an ElementTree object containing an XML Whoami tag.
"""
whoami = ET.Element('Whoami')
whoami.append(self.datatype.getDatatype())
return whoami
def _main(self):
return self.whoami()
class Request(_Base):
"""
Use the Request command to create a complete XML request with tags.
Arguments:
application (obj): an Application object
auth (obj): an Auth object
xml_data (list): a list of Datatype object
"""
def __init__(self, application, auth, xml_data):
_Base.__init__(self)
self.application = application
self.auth = auth
self.xml_data = xml_data
self._header = True
def __str__(self):
return '<Request client={client} company={company} username={username}>'.format(
client=self.application.client, company=self.auth.company,
username=self.auth.username)
def request(self):
"""
Returns an ElementTree object containing an XML request tag
and associated XML data.
"""
request = ET.Element('request')
request.attrib = {
'API_ver': '1.0',
'client': self.application.client,
'client_ver': self.application.client_version,
'namespace': self.application.namespace,
'key': self.application.key
}
if isinstance(self.auth, Auth):
request.append(self.auth.auth())
elif isinstance(self.auth, RemoteAuth):
request.append(self.auth.remoteauth())
else:
raise Exception('you must pass an Auth or RemoteAuth instance')
if self.xml_data:
for elem in self.xml_data:
request.append(elem)
return request
def _main(self):
return self.request()
class Error(_Base):
"""
Use the Error command to return info about an error code.
Fetching errors does not require authentication, so this is
a shortcut without having to create a Datatype and Read
command separately.
Arguments:
code (str): an error code string
"""
def __init__(self, application, code):
_Base.__init__(self)
self.application = application
self.code = str(code)
self._header = True
def __str__(self):
return "<Error code={code}>".format(code=self.code)
def error(self):
"""
Returns an ElementTree object containing XML error tags.
"""
request = ET.Element('request')
request.attrib = {
'API_ver': '1.0',
'client': self.application.client,
'client_ver': self.application.client_version,
'namespace': self.application.namespace,
'key': self.application.key
}
read = ET.SubElement(request, 'Read')
read.attrib = {'type': 'Error', 'method': 'equal to'}
error = ET.SubElement(read, 'Error')
code = ET.SubElement(error, 'code')
code.text = self.code
return request
def _main(self):
return self.error()
| |
# Python stubs generated by omniidl from /usr/local/share/idl/omniORB/compression.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "corbaidl.idl"
import corbaidl_idl
_0_CORBA = omniORB.openModule("omniORB.CORBA")
_0_CORBA__POA = omniORB.openModule("omniORB.CORBA__POA")
#
# Start of module "Compression"
#
__name__ = "omniORB.Compression"
_0_Compression = omniORB.openModule("omniORB.Compression", r"/usr/local/share/idl/omniORB/compression.idl")
_0_Compression__POA = omniORB.openModule("omniORB.Compression__POA", r"/usr/local/share/idl/omniORB/compression.idl")
# exception CompressionException
_0_Compression.CompressionException = omniORB.newEmptyClass()
class CompressionException (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/Compression/CompressionException:1.0"
def __init__(self, reason, description):
CORBA.UserException.__init__(self, reason, description)
self.reason = reason
self.description = description
_0_Compression.CompressionException = CompressionException
_0_Compression._d_CompressionException = (omniORB.tcInternal.tv_except, CompressionException, CompressionException._NP_RepositoryId, "CompressionException", "reason", omniORB.tcInternal.tv_long, "description", (omniORB.tcInternal.tv_string,0))
_0_Compression._tc_CompressionException = omniORB.tcInternal.createTypeCode(_0_Compression._d_CompressionException)
omniORB.registerType(CompressionException._NP_RepositoryId, _0_Compression._d_CompressionException, _0_Compression._tc_CompressionException)
del CompressionException
# exception FactoryAlreadyRegistered
_0_Compression.FactoryAlreadyRegistered = omniORB.newEmptyClass()
class FactoryAlreadyRegistered (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/Compression/FactoryAlreadyRegistered:1.0"
def __init__(self):
CORBA.UserException.__init__(self)
_0_Compression.FactoryAlreadyRegistered = FactoryAlreadyRegistered
_0_Compression._d_FactoryAlreadyRegistered = (omniORB.tcInternal.tv_except, FactoryAlreadyRegistered, FactoryAlreadyRegistered._NP_RepositoryId, "FactoryAlreadyRegistered")
_0_Compression._tc_FactoryAlreadyRegistered = omniORB.tcInternal.createTypeCode(_0_Compression._d_FactoryAlreadyRegistered)
omniORB.registerType(FactoryAlreadyRegistered._NP_RepositoryId, _0_Compression._d_FactoryAlreadyRegistered, _0_Compression._tc_FactoryAlreadyRegistered)
del FactoryAlreadyRegistered
# exception UnknownCompressorId
_0_Compression.UnknownCompressorId = omniORB.newEmptyClass()
class UnknownCompressorId (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/Compression/UnknownCompressorId:1.0"
def __init__(self):
CORBA.UserException.__init__(self)
_0_Compression.UnknownCompressorId = UnknownCompressorId
_0_Compression._d_UnknownCompressorId = (omniORB.tcInternal.tv_except, UnknownCompressorId, UnknownCompressorId._NP_RepositoryId, "UnknownCompressorId")
_0_Compression._tc_UnknownCompressorId = omniORB.tcInternal.createTypeCode(_0_Compression._d_UnknownCompressorId)
omniORB.registerType(UnknownCompressorId._NP_RepositoryId, _0_Compression._d_UnknownCompressorId, _0_Compression._tc_UnknownCompressorId)
del UnknownCompressorId
# typedef ... CompressorId
class CompressorId:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressorId:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressorId = CompressorId
_0_Compression._d_CompressorId = omniORB.tcInternal.tv_ushort
_0_Compression._ad_CompressorId = (omniORB.tcInternal.tv_alias, CompressorId._NP_RepositoryId, "CompressorId", omniORB.tcInternal.tv_ushort)
_0_Compression._tc_CompressorId = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressorId)
omniORB.registerType(CompressorId._NP_RepositoryId, _0_Compression._ad_CompressorId, _0_Compression._tc_CompressorId)
del CompressorId
_0_Compression.COMPRESSORID_NONE = 0
_0_Compression.COMPRESSORID_GZIP = 1
_0_Compression.COMPRESSORID_PKZIP = 2
_0_Compression.COMPRESSORID_BZIP2 = 3
_0_Compression.COMPRESSORID_ZLIB = 4
_0_Compression.COMPRESSORID_LZMA = 5
_0_Compression.COMPRESSORID_LZO = 6
_0_Compression.COMPRESSORID_RZIP = 7
_0_Compression.COMPRESSORID_7X = 8
_0_Compression.COMPRESSORID_XAR = 9
# typedef ... CompressionLevel
class CompressionLevel:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressionLevel:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressionLevel = CompressionLevel
_0_Compression._d_CompressionLevel = omniORB.tcInternal.tv_ushort
_0_Compression._ad_CompressionLevel = (omniORB.tcInternal.tv_alias, CompressionLevel._NP_RepositoryId, "CompressionLevel", omniORB.tcInternal.tv_ushort)
_0_Compression._tc_CompressionLevel = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressionLevel)
omniORB.registerType(CompressionLevel._NP_RepositoryId, _0_Compression._ad_CompressionLevel, _0_Compression._tc_CompressionLevel)
del CompressionLevel
# typedef ... CompressionRatio
class CompressionRatio:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressionRatio:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressionRatio = CompressionRatio
_0_Compression._d_CompressionRatio = omniORB.tcInternal.tv_float
_0_Compression._ad_CompressionRatio = (omniORB.tcInternal.tv_alias, CompressionRatio._NP_RepositoryId, "CompressionRatio", omniORB.tcInternal.tv_float)
_0_Compression._tc_CompressionRatio = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressionRatio)
omniORB.registerType(CompressionRatio._NP_RepositoryId, _0_Compression._ad_CompressionRatio, _0_Compression._tc_CompressionRatio)
del CompressionRatio
# struct CompressorIdLevel
_0_Compression.CompressorIdLevel = omniORB.newEmptyClass()
class CompressorIdLevel (omniORB.StructBase):
_NP_RepositoryId = "IDL:omg.org/Compression/CompressorIdLevel:1.0"
def __init__(self, compressor_id, compression_level):
self.compressor_id = compressor_id
self.compression_level = compression_level
_0_Compression.CompressorIdLevel = CompressorIdLevel
_0_Compression._d_CompressorIdLevel = (omniORB.tcInternal.tv_struct, CompressorIdLevel, CompressorIdLevel._NP_RepositoryId, "CompressorIdLevel", "compressor_id", omniORB.typeMapping["IDL:omg.org/Compression/CompressorId:1.0"], "compression_level", omniORB.typeMapping["IDL:omg.org/Compression/CompressionLevel:1.0"])
_0_Compression._tc_CompressorIdLevel = omniORB.tcInternal.createTypeCode(_0_Compression._d_CompressorIdLevel)
omniORB.registerType(CompressorIdLevel._NP_RepositoryId, _0_Compression._d_CompressorIdLevel, _0_Compression._tc_CompressorIdLevel)
del CompressorIdLevel
# typedef ... CompressorIdLevelList
class CompressorIdLevelList:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressorIdLevelList:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressorIdLevelList = CompressorIdLevelList
_0_Compression._d_CompressorIdLevelList = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/CompressorIdLevel:1.0"], 0)
_0_Compression._ad_CompressorIdLevelList = (omniORB.tcInternal.tv_alias, CompressorIdLevelList._NP_RepositoryId, "CompressorIdLevelList", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/CompressorIdLevel:1.0"], 0))
_0_Compression._tc_CompressorIdLevelList = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressorIdLevelList)
omniORB.registerType(CompressorIdLevelList._NP_RepositoryId, _0_Compression._ad_CompressorIdLevelList, _0_Compression._tc_CompressorIdLevelList)
del CompressorIdLevelList
# typedef ... Buffer
class Buffer:
_NP_RepositoryId = "IDL:omg.org/Compression/Buffer:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.Buffer = Buffer
_0_Compression._d_Buffer = omniORB.typeMapping["IDL:omg.org/CORBA/OctetSeq:1.0"]
_0_Compression._ad_Buffer = (omniORB.tcInternal.tv_alias, Buffer._NP_RepositoryId, "Buffer", omniORB.typeCodeMapping["IDL:omg.org/CORBA/OctetSeq:1.0"]._d)
_0_Compression._tc_Buffer = omniORB.tcInternal.createTypeCode(_0_Compression._ad_Buffer)
omniORB.registerType(Buffer._NP_RepositoryId, _0_Compression._ad_Buffer, _0_Compression._tc_Buffer)
del Buffer
# forward interface CompressorFactory;
_0_Compression._d_CompressorFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/Compression/CompressorFactory:1.0", "CompressorFactory")
omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"] = _0_Compression._d_CompressorFactory
# interface Compressor
_0_Compression._d_Compressor = (omniORB.tcInternal.tv_objref, "IDL:omg.org/Compression/Compressor:1.0", "Compressor")
omniORB.typeMapping["IDL:omg.org/Compression/Compressor:1.0"] = _0_Compression._d_Compressor
_0_Compression.Compressor = omniORB.newEmptyClass()
class Compressor :
_NP_RepositoryId = _0_Compression._d_Compressor[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Compression.Compressor = Compressor
_0_Compression._tc_Compressor = omniORB.tcInternal.createTypeCode(_0_Compression._d_Compressor)
omniORB.registerType(Compressor._NP_RepositoryId, _0_Compression._d_Compressor, _0_Compression._tc_Compressor)
# Compressor operations and attributes
Compressor._d_compress = ((omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"], omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"]), (omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"], ), {_0_Compression.CompressionException._NP_RepositoryId: _0_Compression._d_CompressionException})
Compressor._d_decompress = ((omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"], omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"]), (omniORB.typeMapping["IDL:omg.org/Compression/Buffer:1.0"], ), {_0_Compression.CompressionException._NP_RepositoryId: _0_Compression._d_CompressionException})
Compressor._d__get_compressor_factory = ((),(omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"],),None)
Compressor._d__get_compression_level = ((),(omniORB.typeMapping["IDL:omg.org/Compression/CompressionLevel:1.0"],),None)
Compressor._d__get_compressed_bytes = ((),(omniORB.tcInternal.tv_ulonglong,),None)
Compressor._d__get_uncompressed_bytes = ((),(omniORB.tcInternal.tv_ulonglong,),None)
Compressor._d__get_compression_ratio = ((),(omniORB.typeMapping["IDL:omg.org/Compression/CompressionRatio:1.0"],),None)
# Compressor object reference
class _objref_Compressor (CORBA.Object):
_NP_RepositoryId = Compressor._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def compress(self, *args):
return self._obj.invoke("compress", _0_Compression.Compressor._d_compress, args)
def decompress(self, *args):
return self._obj.invoke("decompress", _0_Compression.Compressor._d_decompress, args)
def _get_compressor_factory(self, *args):
return self._obj.invoke("_get_compressor_factory", _0_Compression.Compressor._d__get_compressor_factory, args)
compressor_factory = property(_get_compressor_factory)
def _get_compression_level(self, *args):
return self._obj.invoke("_get_compression_level", _0_Compression.Compressor._d__get_compression_level, args)
compression_level = property(_get_compression_level)
def _get_compressed_bytes(self, *args):
return self._obj.invoke("_get_compressed_bytes", _0_Compression.Compressor._d__get_compressed_bytes, args)
compressed_bytes = property(_get_compressed_bytes)
def _get_uncompressed_bytes(self, *args):
return self._obj.invoke("_get_uncompressed_bytes", _0_Compression.Compressor._d__get_uncompressed_bytes, args)
uncompressed_bytes = property(_get_uncompressed_bytes)
def _get_compression_ratio(self, *args):
return self._obj.invoke("_get_compression_ratio", _0_Compression.Compressor._d__get_compression_ratio, args)
compression_ratio = property(_get_compression_ratio)
omniORB.registerObjref(Compressor._NP_RepositoryId, _objref_Compressor)
_0_Compression._objref_Compressor = _objref_Compressor
del Compressor, _objref_Compressor
# Compressor skeleton
__name__ = "omniORB.Compression__POA"
class Compressor (PortableServer.Servant):
_NP_RepositoryId = _0_Compression.Compressor._NP_RepositoryId
_omni_op_d = {"compress": _0_Compression.Compressor._d_compress, "decompress": _0_Compression.Compressor._d_decompress, "_get_compressor_factory": _0_Compression.Compressor._d__get_compressor_factory, "_get_compression_level": _0_Compression.Compressor._d__get_compression_level, "_get_compressed_bytes": _0_Compression.Compressor._d__get_compressed_bytes, "_get_uncompressed_bytes": _0_Compression.Compressor._d__get_uncompressed_bytes, "_get_compression_ratio": _0_Compression.Compressor._d__get_compression_ratio}
Compressor._omni_skeleton = Compressor
_0_Compression__POA.Compressor = Compressor
omniORB.registerSkeleton(Compressor._NP_RepositoryId, Compressor)
del Compressor
__name__ = "omniORB.Compression"
# typedef ... CompressorSeq
class CompressorSeq:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressorSeq:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressorSeq = CompressorSeq
_0_Compression._d_CompressorSeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/Compressor:1.0"], 0)
_0_Compression._ad_CompressorSeq = (omniORB.tcInternal.tv_alias, CompressorSeq._NP_RepositoryId, "CompressorSeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/Compressor:1.0"], 0))
_0_Compression._tc_CompressorSeq = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressorSeq)
omniORB.registerType(CompressorSeq._NP_RepositoryId, _0_Compression._ad_CompressorSeq, _0_Compression._tc_CompressorSeq)
del CompressorSeq
# interface CompressorFactory
_0_Compression._d_CompressorFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/Compression/CompressorFactory:1.0", "CompressorFactory")
omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"] = _0_Compression._d_CompressorFactory
_0_Compression.CompressorFactory = omniORB.newEmptyClass()
class CompressorFactory :
_NP_RepositoryId = _0_Compression._d_CompressorFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Compression.CompressorFactory = CompressorFactory
_0_Compression._tc_CompressorFactory = omniORB.tcInternal.createTypeCode(_0_Compression._d_CompressorFactory)
omniORB.registerType(CompressorFactory._NP_RepositoryId, _0_Compression._d_CompressorFactory, _0_Compression._tc_CompressorFactory)
# CompressorFactory operations and attributes
CompressorFactory._d__get_compressor_id = ((),(omniORB.typeMapping["IDL:omg.org/Compression/CompressorId:1.0"],),None)
CompressorFactory._d_get_compressor = ((omniORB.typeMapping["IDL:omg.org/Compression/CompressionLevel:1.0"], ), (omniORB.typeMapping["IDL:omg.org/Compression/Compressor:1.0"], ), None)
# CompressorFactory object reference
class _objref_CompressorFactory (CORBA.Object):
_NP_RepositoryId = CompressorFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_compressor_id(self, *args):
return self._obj.invoke("_get_compressor_id", _0_Compression.CompressorFactory._d__get_compressor_id, args)
compressor_id = property(_get_compressor_id)
def get_compressor(self, *args):
return self._obj.invoke("get_compressor", _0_Compression.CompressorFactory._d_get_compressor, args)
omniORB.registerObjref(CompressorFactory._NP_RepositoryId, _objref_CompressorFactory)
_0_Compression._objref_CompressorFactory = _objref_CompressorFactory
del CompressorFactory, _objref_CompressorFactory
# CompressorFactory skeleton
__name__ = "omniORB.Compression__POA"
class CompressorFactory (PortableServer.Servant):
_NP_RepositoryId = _0_Compression.CompressorFactory._NP_RepositoryId
_omni_op_d = {"_get_compressor_id": _0_Compression.CompressorFactory._d__get_compressor_id, "get_compressor": _0_Compression.CompressorFactory._d_get_compressor}
CompressorFactory._omni_skeleton = CompressorFactory
_0_Compression__POA.CompressorFactory = CompressorFactory
omniORB.registerSkeleton(CompressorFactory._NP_RepositoryId, CompressorFactory)
del CompressorFactory
__name__ = "omniORB.Compression"
# typedef ... CompressorFactorySeq
class CompressorFactorySeq:
_NP_RepositoryId = "IDL:omg.org/Compression/CompressorFactorySeq:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_Compression.CompressorFactorySeq = CompressorFactorySeq
_0_Compression._d_CompressorFactorySeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"], 0)
_0_Compression._ad_CompressorFactorySeq = (omniORB.tcInternal.tv_alias, CompressorFactorySeq._NP_RepositoryId, "CompressorFactorySeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"], 0))
_0_Compression._tc_CompressorFactorySeq = omniORB.tcInternal.createTypeCode(_0_Compression._ad_CompressorFactorySeq)
omniORB.registerType(CompressorFactorySeq._NP_RepositoryId, _0_Compression._ad_CompressorFactorySeq, _0_Compression._tc_CompressorFactorySeq)
del CompressorFactorySeq
# interface CompressionManager
_0_Compression._d_CompressionManager = (omniORB.tcInternal.tv_objref, "IDL:omg.org/Compression/CompressionManager:1.0", "CompressionManager")
omniORB.typeMapping["IDL:omg.org/Compression/CompressionManager:1.0"] = _0_Compression._d_CompressionManager
_0_Compression.CompressionManager = omniORB.newEmptyClass()
class CompressionManager :
_NP_RepositoryId = _0_Compression._d_CompressionManager[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Compression.CompressionManager = CompressionManager
_0_Compression._tc_CompressionManager = omniORB.tcInternal.createTypeCode(_0_Compression._d_CompressionManager)
omniORB.registerType(CompressionManager._NP_RepositoryId, _0_Compression._d_CompressionManager, _0_Compression._tc_CompressionManager)
# CompressionManager operations and attributes
CompressionManager._d_register_factory = ((omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"], ), (), {_0_Compression.FactoryAlreadyRegistered._NP_RepositoryId: _0_Compression._d_FactoryAlreadyRegistered})
CompressionManager._d_unregister_factory = ((omniORB.typeMapping["IDL:omg.org/Compression/CompressorId:1.0"], ), (), {_0_Compression.UnknownCompressorId._NP_RepositoryId: _0_Compression._d_UnknownCompressorId})
CompressionManager._d_get_factory = ((omniORB.typeMapping["IDL:omg.org/Compression/CompressorId:1.0"], ), (omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactory:1.0"], ), {_0_Compression.UnknownCompressorId._NP_RepositoryId: _0_Compression._d_UnknownCompressorId})
CompressionManager._d_get_compressor = ((omniORB.typeMapping["IDL:omg.org/Compression/CompressorId:1.0"], omniORB.typeMapping["IDL:omg.org/Compression/CompressionLevel:1.0"]), (omniORB.typeMapping["IDL:omg.org/Compression/Compressor:1.0"], ), {_0_Compression.UnknownCompressorId._NP_RepositoryId: _0_Compression._d_UnknownCompressorId})
CompressionManager._d_get_factories = ((), (omniORB.typeMapping["IDL:omg.org/Compression/CompressorFactorySeq:1.0"], ), None)
# CompressionManager object reference
class _objref_CompressionManager (CORBA.Object):
_NP_RepositoryId = CompressionManager._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def register_factory(self, *args):
return self._obj.invoke("register_factory", _0_Compression.CompressionManager._d_register_factory, args)
def unregister_factory(self, *args):
return self._obj.invoke("unregister_factory", _0_Compression.CompressionManager._d_unregister_factory, args)
def get_factory(self, *args):
return self._obj.invoke("get_factory", _0_Compression.CompressionManager._d_get_factory, args)
def get_compressor(self, *args):
return self._obj.invoke("get_compressor", _0_Compression.CompressionManager._d_get_compressor, args)
def get_factories(self, *args):
return self._obj.invoke("get_factories", _0_Compression.CompressionManager._d_get_factories, args)
omniORB.registerObjref(CompressionManager._NP_RepositoryId, _objref_CompressionManager)
_0_Compression._objref_CompressionManager = _objref_CompressionManager
del CompressionManager, _objref_CompressionManager
# CompressionManager skeleton
__name__ = "omniORB.Compression__POA"
class CompressionManager (PortableServer.Servant):
_NP_RepositoryId = _0_Compression.CompressionManager._NP_RepositoryId
_omni_op_d = {"register_factory": _0_Compression.CompressionManager._d_register_factory, "unregister_factory": _0_Compression.CompressionManager._d_unregister_factory, "get_factory": _0_Compression.CompressionManager._d_get_factory, "get_compressor": _0_Compression.CompressionManager._d_get_compressor, "get_factories": _0_Compression.CompressionManager._d_get_factories}
CompressionManager._omni_skeleton = CompressionManager
_0_Compression__POA.CompressionManager = CompressionManager
omniORB.registerSkeleton(CompressionManager._NP_RepositoryId, CompressionManager)
del CompressionManager
__name__ = "omniORB.Compression"
#
# End of module "Compression"
#
__name__ = "compression_idl"
_exported_modules = ( "omniORB.Compression", )
# The end.
| |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import base64
import six
from argus.backends import base as base_backend
from argus.backends import windows
from argus.backends.tempest import manager as api_manager
from argus import util
with util.restore_excepthook():
from tempest.common import waiters
LOG = util.get_logger()
# Starting size as number of lines and tolerance.
OUTPUT_SIZE = 128
# pylint: disable=abstract-method; FP: https://bitbucket.org/logilab/pylint/issues/565
@six.add_metaclass(abc.ABCMeta)
class BaseTempestBackend(base_backend.CloudBackend):
"""Base class for backends built on top of Tempest.
:param conf:
The config object used for controlling argus's behaviour.
:param name:
The name will be used for creating instances with this
backend.
:param userdata:
The userdata which will be available in the instance
to the corresponding cloud initialization service.
:param metadata:
The metadata which will be available in the instance
to the corresponding cloud initialization service.
It will be the content of the *meta* key in OpenStack's
metadata for instance.
:param availability_zone:
The availability zone in which the underlying instance
will be available.
"""
def __init__(self, conf, name, userdata, metadata, availability_zone):
if userdata:
userdata = base64.encodestring(userdata)
super(BaseTempestBackend, self).__init__(conf, name, userdata,
metadata, availability_zone)
self._server = None
self._keypair = None
self._security_group = None
self._security_groups_rules = []
self._subnets = []
self._routers = []
self._floating_ip = None
self._networks = None # list with UUIDs for future attached NICs
# set some members from the configuration file needed by recipes
self.image_ref = self._conf.openstack.image_ref
self.flavor_ref = self._conf.openstack.flavor_ref
self._manager = api_manager.APIManager()
def _configure_networking(self):
subnet_id = self._manager.primary_credentials().subnet["id"]
self._manager.network_client.update_subnet(
subnet_id,
dns_nameservers=self._conf.argus.dns_nameservers)
def _create_server(self, wait_until='ACTIVE', **kwargs):
server = self._manager.servers_client.create_server(
name=util.rand_name(self._name) + "-instance",
imageRef=self.image_ref,
flavorRef=self.flavor_ref,
**kwargs)
waiters.wait_for_server_status(
self._manager.servers_client, server['server']['id'], wait_until)
return server['server']
def _assign_floating_ip(self):
floating_ip = self._manager.floating_ips_client.create_floating_ip()
floating_ip = floating_ip['floating_ip']
self._manager.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], self.internal_instance_id())
return floating_ip
def _add_security_group_exceptions(self, secgroup_id):
_client = self._manager.security_group_rules_client
rulesets = [
{
# http RDP
'ip_protocol': 'tcp',
'from_port': 3389,
'to_port': 3389,
'cidr': '0.0.0.0/0',
},
{
# http winrm
'ip_protocol': 'tcp',
'from_port': 5985,
'to_port': 5985,
'cidr': '0.0.0.0/0',
},
{
# https winrm
'ip_protocol': 'tcp',
'from_port': 5986,
'to_port': 5986,
'cidr': '0.0.0.0/0',
},
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
},
]
for ruleset in rulesets:
sg_rule = _client.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
yield sg_rule
def _create_security_groups(self):
sg_name = util.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self._manager.security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
# Add rules to the security group.
for rule in self._add_security_group_exceptions(secgroup['id']):
self._security_groups_rules.append(rule['id'])
self._manager.servers_client.add_security_group(
self.internal_instance_id(),
secgroup['name'])
return secgroup
def cleanup(self):
"""Cleanup the underlying instance.
In order for the backend to be useful again,
call :meth:`setup_instance` method for preparing another
underlying instance.
"""
LOG.info("Cleaning up...")
if self._security_groups_rules:
for rule in self._security_groups_rules:
self._manager.security_group_rules_client.delete_security_group_rule(rule)
if self._security_group:
self._manager.servers_client.remove_security_group(
self.internal_instance_id(),
self._security_group['name'])
if self._server:
self._manager.servers_client.delete_server(
self.internal_instance_id())
waiters.wait_for_server_termination(
self._manager.servers_client,
self.internal_instance_id())
if self._floating_ip:
self._manager.floating_ips_client.delete_floating_ip(
self._floating_ip['id'])
if self._keypair:
self._keypair.destroy()
self._manager.cleanup_credentials()
def setup_instance(self):
# pylint: disable=attribute-defined-outside-init
LOG.info("Creating server...")
self._configure_networking()
self._keypair = self._manager.create_keypair(
name=self.__class__.__name__)
self._server = self._create_server(
wait_until='ACTIVE',
key_name=self._keypair.name,
disk_config='AUTO',
user_data=self.userdata,
meta=self.metadata,
networks=self._networks,
availability_zone=self._availability_zone)
self._floating_ip = self._assign_floating_ip()
self._security_group = self._create_security_groups()
def reboot_instance(self):
# Delegate to the manager to reboot the instance
return self._manager.reboot_instance(self.internal_instance_id())
def instance_password(self):
# Delegate to the manager to find out the instance password
return self._manager.instance_password(
self.internal_instance_id(),
self._keypair)
def internal_instance_id(self):
return self._server["id"]
def instance_output(self, limit=OUTPUT_SIZE):
"""Get the console output, sent from the instance."""
return self._manager.instance_output(
self.internal_instance_id(),
limit)
def instance_server(self):
"""Get the instance server object."""
return self._manager.instance_server(self.internal_instance_id())
def public_key(self):
return self._keypair.public_key
def private_key(self):
return self._keypair.private_key
def get_image_by_ref(self):
image = self._manager.images_client.show_image(self._conf.openstack.image_ref)
return image['image']
def floating_ip(self):
return self._floating_ip['ip']
class BaseWindowsTempestBackend(windows.WindowsBackendMixin,
BaseTempestBackend):
"""Base Tempest backend for testing Windows."""
def _get_log_template(self, suffix):
template = super(BaseWindowsTempestBackend, self)._get_log_template(suffix)
if self._conf.argus.build and self._conf.argus.arch:
# Prepend the log with the installer information (cloud).
template = "{}-{}-{}".format(self._conf.argus.build,
self._conf.argus.arch,
template)
return template
| |
# -*- coding: utf-8 -*-
"""
pyvisa.ctwrapper.highlevel
~~~~~~~~~~~~~~~~~~~~~~~~~~
Highlevel wrapper of the VISA Library.
This file is part of PyVISA.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import logging
import warnings
from pyvisa import constants, errors, highlevel, logger
from pyvisa.compat import integer_types, OrderedDict
from .cthelper import Library, find_library
from . import functions
logger = logging.LoggerAdapter(logger, {'backend': 'ni'})
def add_visa_methods(aclass):
for method in functions.visa_functions:
setattr(aclass, method, getattr(functions, method))
return aclass
def _args_to_str(args):
out = []
for arg in args:
try:
# noinspection PyProtectedMember
out.append(str(arg._obj))
except AttributeError:
out.append(arg)
return tuple(out)
@add_visa_methods
class NIVisaLibrary(highlevel.VisaLibraryBase):
"""High level NI-VISA Library wrapper using ctypes.
The easiest way to instantiate the library is to let `pyvisa` find the
right one for you. This looks first in your configuration file (~/.pyvisarc).
If it fails, it uses `ctypes.util.find_library` to try to locate a library
in a way similar to what the compiler does:
>>> visa_library = NIVisaLibrary()
But you can also specify the path:
>>> visa_library = NIVisaLibrary('/my/path/visa.so')
:param library_path: path of the VISA library.
"""
@staticmethod
def get_library_paths():
"""Return a tuple of possible library paths.
:rtype: tuple
"""
from ..util import LibraryPath, read_user_library_path
user_lib = read_user_library_path()
tmp = [find_library(library_path)
for library_path in ('visa', 'visa32', 'visa32.dll', 'visa64', 'visa64.dll')]
tmp = [LibraryPath(library_path)
for library_path in set(tmp)
if library_path is not None]
logger.debug('Automatically found library files: %s' % tmp)
if user_lib:
user_lib = LibraryPath(user_lib, 'user')
try:
tmp.remove(user_lib)
except ValueError:
pass
tmp.insert(0, user_lib)
return tuple(tmp)
@staticmethod
def get_debug_info():
"""Return a list of lines with backend info.
"""
from pyvisa import __version__
d = OrderedDict()
d['Version'] = '%s (bundled with PyVISA)' % __version__
paths = NIVisaLibrary.get_library_paths()
for ndx, visalib in enumerate(paths, 1):
nfo = OrderedDict()
nfo['found by'] = visalib.found_by
nfo['bitness'] = visalib.bitness
try:
lib = NIVisaLibrary(visalib)
sess, _ = lib.open_default_resource_manager()
nfo['Vendor'] = str(lib.get_attribute(sess, constants.VI_ATTR_RSRC_MANF_NAME)[0])
nfo['Impl. Version'] = str(lib.get_attribute(sess, constants.VI_ATTR_RSRC_IMPL_VERSION)[0])
nfo['Spec. Version'] = str(lib.get_attribute(sess, constants.VI_ATTR_RSRC_SPEC_VERSION)[0])
lib.close(sess)
except Exception as e:
e = str(e)
if 'No matching architecture' in e:
nfo['Could not get more info'] = 'Interpreter and library have different bitness.'
else:
nfo['Could not get more info'] = str(e).split('\n')
d['#%d: %s' % (ndx, visalib)] = nfo
if not paths:
d['Binary library'] = 'Not found'
return d
def _init(self):
try:
lib = Library(self.library_path)
except OSError as exc:
raise errors.LibraryError.from_exception(exc, self.library_path)
self.lib = lib
# Set the argtypes, restype and errcheck for each function
# of the visa library. Additionally store in `_functions` the
# name of the functions.
functions.set_signatures(self.lib, errcheck=self._return_handler)
logger.debug('Library signatures: %d ok, %d failed',
len(getattr(self.lib, '_functions', [])),
len(getattr(self.lib, '_functions_failed', [])))
# Set the library functions as attributes of the object.
for method_name in getattr(self.lib, '_functions', []):
setattr(self, method_name, getattr(self.lib, method_name))
def _return_handler(self, ret_value, func, arguments):
"""Check return values for errors and warnings.
"""
logger.debug('%s%s -> %r',
func.__name__, _args_to_str(arguments), ret_value,
extra=self._logging_extra)
try:
ret_value = constants.StatusCode(ret_value)
except ValueError:
pass
self._last_status = ret_value
# The first argument of almost all registered visa functions is a session.
# We store the error code per session
session = None
if func.__name__ not in ('viFindNext', ):
try:
session = arguments[0]
except KeyError:
raise Exception('Function %r does not seem to be a valid '
'visa function (len args %d)' % (func, len(arguments)))
# Functions that use the first parameter to get a session value.
if func.__name__ in ('viOpenDefaultRM', ):
# noinspection PyProtectedMember
session = session._obj.value
if isinstance(session, integer_types):
self._last_status_in_session[session] = ret_value
else:
# Functions that might or might have a session in the first argument.
if func.__name__ not in ('viClose', 'viGetAttribute', 'viSetAttribute', 'viStatusDesc'):
raise Exception('Function %r does not seem to be a valid '
'visa function (type args[0] %r)' % (func, type(session)))
if ret_value < 0:
raise errors.VisaIOError(ret_value)
if ret_value in self.issue_warning_on:
if session and ret_value not in self._ignore_warning_in_session[session]:
warnings.warn(errors.VisaIOWarning(ret_value), stacklevel=2)
return ret_value
def list_resources(self, session, query='?*::INSTR'):
"""Returns a tuple of all connected devices matching query.
:param query: regular expression used to match devices.
"""
resources = []
try:
find_list, return_counter, instrument_description, err = self.find_resources(session, query)
except errors.VisaIOError as e:
if e.error_code == constants.StatusCode.error_resource_not_found:
return tuple()
raise e
resources.append(instrument_description)
for i in range(return_counter - 1):
resources.append(self.find_next(find_list)[0])
self.close(find_list)
return tuple(resource for resource in resources)
| |
<<<<<<< HEAD
<<<<<<< HEAD
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print(i)
=======
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print(i)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print(i)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
"""
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, University of California, Berkeley
# All rights reserved.
# Authors: Cameron Lee (cameronlee@berkeley.edu) and Dmitry Berenson (
berenson@eecs.berkeley.edu)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of California, Berkeley nor the names
of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
This file contains several wrapper classes for planning paths, performing
collision checking, retrieving paths, and drawing points in RViz.
None of these classes really perform actualy work but rather are just calling
ROS services.
"""
import roslib
import rospy
import threading
import sys
from lightning.msg import Float64Array, Float64Array2D, DrawPoints
from lightning.srv import CollisionCheck, CollisionCheckRequest, PathShortcut, PathShortcutRequest
from moveit_msgs.srv import GetMotionPlan, GetMotionPlanRequest
from moveit_msgs.msg import JointConstraint, Constraints
# Names of Topics/Services to be advertised/used by these wrappers.
# The name of the collision checking service.
COLLISION_CHECK = "collision_check"
# The name of the path shortcutting service.
SHORTCUT_PATH_NAME = "shortcut_path"
# Topic to publish to for drawing points in RViz.
DISPLAY_POINTS = "draw_points"
# Name of the planner_stoppable services advertised from various lightning nodes.
PLANNER_NAME = "plan_kinematic_path"
class PlanTrajectoryWrapper:
"""
This wrapper class handles calling the GetMotionPlan service of
planner_stoppable type nodes, handling keeping track of multiple
planners (for multi-threading), constructing the service requests and
extracting the useful information from the response.
"""
def __init__(self, node_type, num_planners=1):
"""
Constructor for PlanTrajectoryWrapper.
Args:
node_type (string): The type of planner that this is being used by,
generally "pfs" or "rr".
num_planners (int): The number of planner nodes that are being used.
"""
self.planners = ["%s_planner_node%i/%s" % (node_type, i, PLANNER_NAME) for i in xrange(num_planners)]
rospy.loginfo("Initializaing %i planners for %s" % (num_planners, node_type))
self.planners_available = [True for i in xrange(num_planners)]
self.planner_lock = threading.Lock()
self.released_event = threading.Event()
self.released_event.set()
def acquire_planner(self):
"""
Acquires a planner lock so that plan_trajectory() can be called.
This must be called before calling plan_trajectory().
Returns:
int: The index of the planner whose lock was acquired.
This is only really relevant if multiple planners are being used
and is the number that should be passed as the planner_number
to plan_trajectory().
"""
planner_number = self._wait_for_planner()
while planner_number == -1:
self.released_event.wait()
planner_number = self._wait_for_planner()
return planner_number
def release_planner(self, index):
"""
Releases the planner lock that you acquired so that plan_trajectory()
can be called on that planner by someone else.
This should be called after you are done calling plan_trajectory().
"""
self.planner_lock.acquire()
self.planners_available[index] = True
self.released_event.set()
self.planner_lock.release()
def _wait_for_planner(self):
"""
Waits for at least one planner lock to release so that it can be
acquired.
"""
self.planner_lock.acquire()
acquired_planner = -1
for i, val in enumerate(self.planners_available):
if val:
self.planners_available[i] = False
if not any(self.planners_available):
self.released_event.clear()
acquired_planner = i
break
self.planner_lock.release()
return acquired_planner
#planner to get new trajectory from start_point to goal_point
#planner_number is the number received from acquire_planner
def plan_trajectory(self, start_point, goal_point, planner_number, joint_names, group_name, planning_time, planner_config_name):
"""
Given a start and goal point, returns the planned path.
Args:
start_point (list of float): A starting joint configuration.
goal_point (list of float): A goal joint configuration.
planner_number (int): The index of the planner to be used as
returned by acquire_planner().
joint_names (list of str): The name of the joints corresponding to
start_point and goal_point.
group_name (str): The name of the group to which the joint names
correspond.
planning_time (float): Maximum allowed time for planning, in seconds.
planner_config_name (str): Type of planner to use.
Return:
list of list of float: A sequence of points representing the joint
configurations at each point on the path.
"""
planner_client = rospy.ServiceProxy(self.planners[planner_number], GetMotionPlan)
rospy.loginfo("Plan Trajectory Wrapper: got a plan_trajectory request for %s with start = %s and goal = %s" % (self.planners[planner_number], start_point, goal_point))
# Put together the service request.
req = GetMotionPlanRequest()
req.motion_plan_request.workspace_parameters.header.stamp = rospy.get_rostime()
req.motion_plan_request.group_name = group_name
req.motion_plan_request.num_planning_attempts = 1
req.motion_plan_request.allowed_planning_time = planning_time
req.motion_plan_request.planner_id = planner_config_name #using RRT planner by default
req.motion_plan_request.start_state.joint_state.header.stamp = rospy.get_rostime()
req.motion_plan_request.start_state.joint_state.name = joint_names
req.motion_plan_request.start_state.joint_state.position = start_point
req.motion_plan_request.goal_constraints.append(Constraints())
req.motion_plan_request.goal_constraints[0].joint_constraints = []
for i in xrange(len(joint_names)):
temp_constraint = JointConstraint()
temp_constraint.joint_name = joint_names[i]
temp_constraint.position = goal_point[i]
temp_constraint.tolerance_above = 0.05;
temp_constraint.tolerance_below = 0.05;
req.motion_plan_request.goal_constraints[0].joint_constraints.append(temp_constraint)
#call the planner
rospy.wait_for_service(self.planners[planner_number])
rospy.loginfo("Plan Trajectory Wrapper: sent request to service %s" % planner_client.resolved_name)
try:
response = planner_client(req)
except rospy.ServiceException, e:
rospy.loginfo("Plan Trajectory Wrapper: service call failed: %s"%e)
return None
# Pull a list of joint positions out of the returned plan.
rospy.loginfo("Plan Trajectory Wrapper: %s returned" % (self.planners[planner_number]))
if response.motion_plan_response.error_code.val == response.motion_plan_response.error_code.SUCCESS:
return [pt.positions for pt in response.motion_plan_response.trajectory.joint_trajectory.points]
else:
rospy.loginfo("Plan Trajectory Wrapper: service call to %s was unsuccessful" % planner_client.resolved_name)
return None
class ShortcutPathWrapper:
"""
This is a very thin wrapper over the path shortcutting service.
"""
def shortcut_path(self, original_path, group_name):
"""
Shortcuts a path, where the path is for a given group name.
Args:
original_path (list of list of float): The path, represented by
a list of individual joint configurations.
group_name (str): The group for which the path was created.
Return:
list of list of float: The shortcutted version of the path.
"""
shortcut_path_client = rospy.ServiceProxy(SHORTCUT_PATH_NAME, PathShortcut)
shortcut_req = PathShortcutRequest()
shortcut_req.path = [Float64Array(p) for p in original_path]
shortcut_req.group_name = group_name
rospy.wait_for_service(SHORTCUT_PATH_NAME)
response = shortcut_path_client(shortcut_req)
return [p.values for p in response.new_path]
class InvalidSectionWrapper:
"""
This is a very thin wrapper over the collision checking service.
"""
def get_invalid_sections_for_path(self, original_path, group_name):
"""
Returns the invalid sections for a single path.
Args:
original_path (list of list of float): The path to collision check,
represnted by a list of individual joint configurations.
group_name (str): The joint group for which the path was created.
Return:
list of pairs of indicies, where each index in a pair is the start
and end of an invalid section.
"""
section = self.get_invalid_sections_for_paths([original_path], group_name)
if len(section) > 0:
return section[0]
else:
return None
def get_invalid_sections_for_paths(self, orig_paths, group_name):
"""
Returns the invalid sections for a set of paths.
Args:
orig_paths (list of paths): The paths to collision check,
represnted by a list of individual joint configurations.
group_name (str): The joint group for which the paths were created.
Return:
list of list of pairs of indicies, where each index in a pair is the
start and end of an invalid section.
"""
collision_check_client = rospy.ServiceProxy(COLLISION_CHECK, CollisionCheck)
cc_req = CollisionCheckRequest();
cc_req.paths = [Float64Array2D([Float64Array(point) for point in path]) for path in orig_paths];
cc_req.group_name = group_name
rospy.loginfo("Plan Trajectory Wrapper: sending request to collision checker")
rospy.wait_for_service(COLLISION_CHECK)
response = collision_check_client(cc_req);
return [[sec.values for sec in individualPathSections.points] for individualPathSections in response.invalid_sections];
class DrawPointsWrapper:
"""
Wrapper to draw all the points for a path in RVIz.
The points are drawn on the DISPLAY_POINTS topic, which is subscribed to
by PointDrawer.py. This class is used when running tests.
"""
#point colors
WHITE = (1.0, 1.0, 1.0)
BLACK = (0.0, 0.0, 0.0)
RED = (1.0, 0.0, 0.0)
GREEN = (0.0, 1.0, 0.0)
BLUE = (0.0, 0.0, 1.0)
MAGENTA = (1.0, 0.0, 1.0)
YELLOW = (1.0, 1.0, 0.0)
GREENBLUE = (0.0, 1.0, 1.0)
#point types
ANGLES = "angles"
POSES = "poses"
def __init__(self):
self.display_points_publisher = rospy.Publisher(DISPLAY_POINTS, DrawPoints, queue_size=10)
def draw_points(self, path, model_group_name, point_group_name, point_type, rgb, display_density, point_radius=0.03):
"""
Draws the points of a given path in RViz.
Args:
path (list of list of float): The path to draw.
model_group_name (str): The name of the joint group in question.
For the PR2 arms, this would be "right_arm" or "left_arm".
point_group_name (str): The namespace under which the points will
show up in RViz.
point_type (str): Type of point, ANGLES or POSES.
rgb (tuple of float): Color of points being drawn. Some colors are
pre-defined as members of this class.
display_density (float): The fraction of the path to be displayed.
For instance, if display_density = 0.25, one in four points will
be shown.
point_radius (float): Size of the individual points to be drawn.
"""
draw_message = DrawPoints()
draw_message.points = [Float64Array(p) for p in path]
draw_message.model_group_name = model_group_name
draw_message.point_group_name = point_group_name
draw_message.point_type = draw_message.POINT_TYPE_ANGLES if point_type == DrawPointsWrapper.ANGLES else draw_message.POINT_TYPE_POSES
draw_message.display_density = display_density
draw_message.red, draw_message.green, draw_message.blue = rgb
draw_message.action = draw_message.ACTION_ADD
draw_message.point_radius = point_radius
self.display_points_publisher.publish(draw_message)
def clear_points(self):
"""
Clears all of the points from the display.
"""
draw_message = DrawPoints()
draw_message.action = draw_message.ACTION_CLEAR
self.display_points_publisher.publish(draw_message)
if __name__ == "__main__":
if len(sys.argv) == 8:
isw = InvalidSectionWrapper()
path = [float(sys.argv[i]) for i in xrange(1, len(sys.argv))]
print isw.get_invalid_sections_for_path([path])
| |
"""Alexa configuration for Home Assistant Cloud."""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from hass_nabucasa import Cloud, cloud_api
from homeassistant.components.alexa import (
DOMAIN as ALEXA_DOMAIN,
config as alexa_config,
entities as alexa_entities,
errors as alexa_errors,
state_report as alexa_state_report,
)
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES, HTTP_BAD_REQUEST
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.helpers import entity_registry, start
from homeassistant.helpers.event import async_call_later
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from .const import CONF_ENTITY_CONFIG, CONF_FILTER, PREF_SHOULD_EXPOSE, RequireRelink
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
# Time to wait when entity preferences have changed before syncing it to
# the cloud.
SYNC_DELAY = 1
class AlexaConfig(alexa_config.AbstractConfig):
"""Alexa Configuration."""
def __init__(
self,
hass: HomeAssistant,
config: dict,
cloud_user: str,
prefs: CloudPreferences,
cloud: Cloud,
) -> None:
"""Initialize the Alexa config."""
super().__init__(hass)
self._config = config
self._cloud_user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._token = None
self._token_valid = None
self._cur_entity_prefs = prefs.alexa_entity_configs
self._cur_default_expose = prefs.alexa_default_expose
self._alexa_sync_unsub = None
self._endpoint = None
prefs.async_listen_updates(self._async_prefs_updated)
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
@property
def enabled(self):
"""Return if Alexa is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.alexa_enabled
)
@property
def supports_auth(self):
"""Return if config supports auth."""
return True
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._prefs.alexa_report_state
@property
def endpoint(self):
"""Endpoint for report state."""
if self._endpoint is None:
raise ValueError("No endpoint available. Fetch access token first")
return self._endpoint
@property
def locale(self):
"""Return config locale."""
# Not clear how to determine locale atm.
return "en-US"
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@callback
def user_identifier(self):
"""Return an identifier for the user that represents this config."""
return self._cloud_user
async def async_initialize(self):
"""Initialize the Alexa config."""
async def hass_started(hass):
if self.enabled and ALEXA_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, ALEXA_DOMAIN, {})
start.async_at_start(self.hass, hass_started)
def should_expose(self, entity_id):
"""If an entity should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config[CONF_FILTER].empty_filter:
return self._config[CONF_FILTER](entity_id)
entity_configs = self._prefs.alexa_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
default_expose = self._prefs.alexa_default_expose
# Backwards compat
if default_expose is None:
return True
return split_entity_id(entity_id)[0] in default_expose
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._token_valid = None
async def async_get_access_token(self):
"""Get an access token."""
if self._token_valid is not None and self._token_valid > utcnow():
return self._token
resp = await cloud_api.async_alexa_access_token(self._cloud)
body = await resp.json()
if resp.status == HTTP_BAD_REQUEST:
if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"):
if self.should_report_state:
await self._prefs.async_update(alexa_report_state=False)
self.hass.components.persistent_notification.async_create(
f"There was an error reporting state to Alexa ({body['reason']}). "
"Please re-link your Alexa skill via the Alexa app to "
"continue using it.",
"Alexa state reporting disabled",
"cloud_alexa_report",
)
raise RequireRelink
raise alexa_errors.NoTokenAvailable
self._token = body["access_token"]
self._endpoint = body["event_endpoint"]
self._token_valid = utcnow() + timedelta(seconds=body["expires_in"])
return self._token
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if ALEXA_DOMAIN not in self.hass.config.components and self.enabled:
await async_setup_component(self.hass, ALEXA_DOMAIN, {})
if self.should_report_state != self.is_reporting_states:
if self.should_report_state:
await self.async_enable_proactive_mode()
else:
await self.async_disable_proactive_mode()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities()
return
# If user has filter in config.yaml, don't sync.
if not self._config[CONF_FILTER].empty_filter:
return
# If entity prefs are the same, don't sync.
if (
self._cur_entity_prefs is prefs.alexa_entity_configs
and self._cur_default_expose is prefs.alexa_default_expose
):
return
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
if self._cur_default_expose is not prefs.alexa_default_expose:
await self.async_sync_entities()
return
self._alexa_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._sync_prefs
)
async def _sync_prefs(self, _now):
"""Sync the updated preferences to Alexa."""
self._alexa_sync_unsub = None
old_prefs = self._cur_entity_prefs
new_prefs = self._prefs.alexa_entity_configs
seen = set()
to_update = []
to_remove = []
for entity_id, info in old_prefs.items():
seen.add(entity_id)
old_expose = info.get(PREF_SHOULD_EXPOSE)
if entity_id in new_prefs:
new_expose = new_prefs[entity_id].get(PREF_SHOULD_EXPOSE)
else:
new_expose = None
if old_expose == new_expose:
continue
if new_expose:
to_update.append(entity_id)
else:
to_remove.append(entity_id)
# Now all the ones that are in new prefs but never were in old prefs
for entity_id, info in new_prefs.items():
if entity_id in seen:
continue
new_expose = info.get(PREF_SHOULD_EXPOSE)
if new_expose is None:
continue
# Only test if we should expose. It can never be a remove action,
# as it didn't exist in old prefs object.
if new_expose:
to_update.append(entity_id)
# We only set the prefs when update is successful, that way we will
# retry when next change comes in.
if await self._sync_helper(to_update, to_remove):
self._cur_entity_prefs = new_prefs
async def async_sync_entities(self):
"""Sync all entities to Alexa."""
# Remove any pending sync
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
to_update = []
to_remove = []
for entity in alexa_entities.async_get_entities(self.hass, self):
if self.should_expose(entity.entity_id):
to_update.append(entity.entity_id)
else:
to_remove.append(entity.entity_id)
return await self._sync_helper(to_update, to_remove)
async def _sync_helper(self, to_update, to_remove) -> bool:
"""Sync entities to Alexa.
Return boolean if it was successful.
"""
if not to_update and not to_remove:
return True
# Make sure it's valid.
await self.async_get_access_token()
tasks = []
if to_update:
tasks.append(
alexa_state_report.async_send_add_or_update_message(
self.hass, self, to_update
)
)
if to_remove:
tasks.append(
alexa_state_report.async_send_delete_message(self.hass, self, to_remove)
)
try:
with async_timeout.timeout(10):
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return True
except asyncio.TimeoutError:
_LOGGER.warning("Timeout trying to sync entities to Alexa")
return False
except aiohttp.ClientError as err:
_LOGGER.warning("Error trying to sync entities to Alexa: %s", err)
return False
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
entity_id = event.data["entity_id"]
if not self.should_expose(entity_id):
return
action = event.data["action"]
to_update = []
to_remove = []
if action == "create":
to_update.append(entity_id)
elif action == "remove":
to_remove.append(entity_id)
elif action == "update" and bool(
set(event.data["changes"]) & entity_registry.ENTITY_DESCRIBING_ATTRIBUTES
):
to_update.append(entity_id)
if "old_entity_id" in event.data:
to_remove.append(event.data["old_entity_id"])
with suppress(alexa_errors.NoTokenAvailable):
await self._sync_helper(to_update, to_remove)
| |
from .constants import MILLI_MICROS,SECOND_MICROS,MINUTE_MICROS,HOUR_MICROS,MEAN_DAY_MICROS,MEAN_WEEK_MICROS,MEAN_MONTH_MICROS,MEAN_YEAR_MICROS,HALF_MILLI_MICROS,HALF_SECOND_MICROS,HALF_MINUTE_MICROS,HALF_HOUR_MICROS,HALF_MEAN_DAY_MICROS,HALF_MEAN_WEEK_MICROS,HALF_MEAN_MONTH_MICROS,HALF_MEAN_YEAR_MICROS
import time
TRANSLATIONS = (
(('my','mean_years'),MEAN_YEAR_MICROS),
(('mm','mean_months'),MEAN_MONTH_MICROS),
(('mw','mean_weeks'),MEAN_WEEK_MICROS),
(('md','mean_days'),MEAN_DAY_MICROS),
(('h','hours'),HOUR_MICROS),
(('m','mins','minutes'),MINUTE_MICROS),
(('s','secs','seconds'),SECOND_MICROS),
(('ms','millis','milliseconds'),MILLI_MICROS),
(('us','micros','microseconds'),1) )
TRANSLATION_HASH = dict((alt,v) for k,v in TRANSLATIONS for alt in k)
class SaneDelta(object):
def __init__(self, *args, **kwargs):
if args:
self.us = int(args[0])
else:
self.us = sum(TRANSLATION_HASH[k]*(v or 0) for k,v in kwargs.iteritems())
# rounded amounts
@property
def rounded_microseconds(self): return self.us
@property
def rounded_milliseconds(self): return (self.us + HALF_MILLI_MICROS) / MILLI_MICROS
@property
def rounded_seconds(self): return (self.us + HALF_SECOND_MICROS) / SECOND_MICROS
@property
def rounded_minutes(self): return (self.us + HALF_MINUTE_MICROS) / MINUTE_MICROS
@property
def rounded_hours(self): return (self.us + HALF_HOUR_MICROS) / HOUR_MICROS
@property
def rounded_mean_days(self): return (self.us + HALF_MEAN_DAY_MICROS) / MEAN_DAY_MICROS
@property
def rounded_mean_weeks(self): return (self.us + HALF_MEAN_WEEK_MICROS) / MEAN_WEEK_MICROS
@property
def rounded_mean_months(self): return (self.us + HALF_MEAN_MONTH_MICROS) / MEAN_MONTH_MICROS
@property
def rounded_mean_years(self): return (self.us + HALF_MEAN_YEAR_MICROS) / MEAN_YEAR_MICROS
# aliases
rus = rounded_micros = rounded_microseconds
rms = rounded_millis = rounded_milliseconds
rs = rounded_secs = rounded_seconds
rm = rounded_mins = rounded_minutes
rh = rounded_hours
rmd = rounded_mean_days
rmw = rounded_mean_weeks
rmm = rounded_mean_months
rmy = rounded_mean_years
#rounded amounts are default aliases
micros = microseconds = rus
ms = millis = milliseconds = rms
s = secs = seconds = rs
m = mins = minutes = rm
h = hours = rh
md = mean_days = rmd
mw = mean_weeks = rmw
mm = mean_months = rmm
my = mean_years = rmy
# unrounded amounts
@property
def whole_microseconds(self): return self.us
@property
def whole_milliseconds(self): return self.us / MILLI_MICROS
@property
def whole_seconds(self): return self.us / SECOND_MICROS
@property
def whole_minutes(self): return self.us / MINUTE_MICROS
@property
def whole_hours(self): return self.us / HOUR_MICROS
@property
def whole_mean_days(self): return self.us / MEAN_DAY_MICROS
@property
def whole_mean_weeks(self): return self.us / MEAN_WEEK_MICROS
@property
def whole_mean_months(self): return self.us / MEAN_MONTH_MICROS
@property
def whole_mean_years(self): return self.us / MEAN_YEAR_MICROS
# aliases
wus = whole_micros = whole_microseconds
wms = whole_millis = whole_milliseconds
ws = whole_secs = whole_seconds
wm = whole_mins = whole_minutes
wh = whole_hours
wmd = whole_mean_days
wmw = whole_mean_weeks
wmm = whole_mean_months
wmy = whole_mean_years
# float amounts
@property
def float_microseconds(self): return float(self.us)
@property
def float_milliseconds(self): return float(self.us) / MILLI_MICROS
@property
def float_seconds(self): return float(self.us) / SECOND_MICROS
@property
def float_minutes(self): return float(self.us)/ MINUTE_MICROS
@property
def float_hours(self): return float(self.us) / HOUR_MICROS
@property
def float_mean_days(self): return float(self.us) / MEAN_DAY_MICROS
@property
def float_mean_weeks(self): return float(self.us) / MEAN_WEEK_MICROS
@property
def float_mean_months(self): return float(self.us) / MEAN_MONTH_MICROS
@property
def float_mean_years(self): return float(self.us) / MEAN_YEAR_MICROS
# aliases
fus = float_micros = float_microseconds
fms = float_millis = float_milliseconds
fs = float_secs = float_seconds
fm = float_mins = float_minutes
fh = float_hours
fmd = float_mean_days
fmw = float_mean_weeks
fmm = float_mean_months
fmy = float_mean_years
# positional amounts
@property
def positional_microseconds(self): return self.us % SECOND_MICROS
@property
def positional_milliseconds(self): return self.us % SECOND_MICROS / MILLI_MICROS
@property
def positional_seconds(self): return self.us % MINUTE_MICROS / SECOND_MICROS
@property
def positional_minutes(self): return self.us % HOUR_MICROS / MINUTE_MICROS
@property
def positional_hours(self): return self.us % MEAN_DAY_MICROS / HOUR_MICROS
#aliases
pus = positional_micros = positional_microseconds
pms = positional_millis = positional_milliseconds
ps = positional_secs = positional_seconds
pm = positional_mins = positional_minutes
ph = positional_hours
# positional rounded amounts
@property
def positional_rounded_microseconds(self): return self.us % SECOND_MICROS
@property
def positional_rounded_milliseconds(self): return (self.us % SECOND_MICROS + HALF_MILLI_MICROS) / MILLI_MICROS
@property
def positional_rounded_seconds(self): return (self.us % MINUTE_MICROS + HALF_SECOND_MICROS) / SECOND_MICROS
@property
def positional_rounded_minutes(self): return (self.us % HOUR_MICROS + HALF_MINUTE_MICROS) / MINUTE_MICROS
@property
def positional_rounded_hours(self): return (self.us % MEAN_DAY_MICROS + HALF_HOUR_MICROS) / HOUR_MICROS
#aliases
prus = positional_rounded_micros = positional_rounded_microseconds
prms = positional_rounded_millis = positional_rounded_milliseconds
prs = positional_rounded_secs = positional_rounded_seconds
prm = positional_rounded_mins = positional_rounded_minutes
prh = positional_rounded_hours
def clone(self): return SaneDelta(self.us)
def __cmp__(self, other): return cmp(self.us, int(other))
def __hash__(self): return hash(self.us)
def __int__(self): return self.us
def __long__(self): return long(self.us)
def __add__(self, operand): return SaneDelta(self.us + int(operand))
def __sub__(self, operand): return SaneDelta(self.us - int(operand))
def __mul__(self, operand): return SaneDelta(self.us * int(operand))
def __div__(self, operand): return SaneDelta(self.us / int(operand))
def __rmul__(self, operand): return int(operand) * self.us
def __rdiv__(self, operand): return int(operand) / self.us
def __neg__(self): return SaneDelta(-self.us)
def __pos__(self): return SaneDelta(+self.us)
def __abs__(self): return SaneDelta(abs(self.us))
def __repr__(self): return 'SaneDelta(%s)'%self.us
def __str__(self): return unicode(self).encode('utf-8')
def __unicode__(self): return self.construct_str()
@property
def abbr(self): return self.construct_str(max_positions=2, final_position='s', separator='', no_zero_positions=True)
#TODO: test this sucker
#TODO; test negative deltas
def construct_str(self, max_positions=None, final_position='us', separator=' ', no_zero_positions=False):
parts = []
delta = abs(self)
max_positions = max_positions or 6
if final_position == 'md' or len(parts)==max_positions-1 and delta.wmd:
parts.append((delta.rmd,"%sd"%delta.rmd))
else:
if delta.wmd: parts.append((delta.wmd,"%sd"%delta.wmd))
if final_position == 'h' or len(parts)==max_positions-1 and (delta.ph or len(parts)):
parts.append((delta.prh,"%sh"%delta.prh))
else:
if delta.ph or len(parts): parts.append((delta.ph,"%sh"%delta.ph))
if final_position == 'm' or len(parts)==max_positions-1 and (delta.pm or len(parts)):
parts.append((delta.prm,"%sm"%delta.prm))
else:
if delta.pm or len(parts): parts.append((delta.pm,"%sm"%delta.pm))
if final_position == 's' or len(parts)==max_positions-1 and (delta.ps or len(parts)):
parts.append((delta.prs,"%ss"%delta.prs))
else:
parts.append((delta.ps,'%s'%delta.ps))
if final_position == 'ms' or len(parts)==max_positions and (delta.pms or len(parts)):
parts.append((delta.prms,".%03ds"%delta.prms,True))
else:
parts.append((delta.pus,".%06ds"%delta.pus,True))
while no_zero_positions and len(parts)>1 and not parts[-1][0]: parts.pop()
return ("%s%s%s%s" % ('' if self>=0 else '-', separator.join([p[1] for p in parts[:-1]]), '' if len(parts[-1])==3 else separator, parts[-1][1])).strip()
def sleep(self): return time.sleep(self.float_seconds)
#TODO: implement
#def ago(self):
#"""
#Get a datetime object or a int() Epoch timestamp and return a
#pretty string like 'an hour ago', 'Yesterday', '3 months ago',
#'just now', etc
#copied from http://stackoverflow.com/questions/1551382/python-user-friendly-time-format
#and then tweaked
#"""
#micro_delta = SaneTime().us - self.us
#second_delta = (micro_delta+500*1000)/1000**2
#day_delta = (micro_delta+1000**2*60**2*12)/(1000**2*60**2*24)
#if micro_delta < 0:
## TODO: implement future times
#return ''
#if day_delta == 0:
#if second_delta < 10:
#return "just now"
#if second_delta < 30:
#return "%s seconds ago" % second_delta
#if second_delta < 90:
#return "a minute ago"
#if second_delta < 30*60:
#return "%s minutes ago" % ((second_delta+30)/60)
#if second_delta < 90*60:
#return "an hour ago"
#return "%s hours ago" % ((second_delta+30*60)/60**2)
#if day_delta < 2:
#return "yesterday"
#if day_delta < 7:
#return "%s days ago" % day_delta
#if day_delta < 11:
#return "a week ago" % day_delta
#if day_delta < 45:
#return "%s weeks ago" % ((day_delta+3)/7)
#if day_delta < 400:
#return "%s months ago" % ((day_delta+15)/30)
#return "%s years ago" % ((day_delta+182)/365)
def nsanedelta(*args, **kwargs):
if args:
if args[0] is None: return None
elif kwargs:
if set(kwargs.values()) == set([None]): return None
return SaneDelta(*args, **kwargs)
#aliases:
delta = sanedelta = SaneDelta
ndelta = nsanedelta
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import fnmatch
import itertools
import re
import os
import shutil
import tempfile
from conary.build import cook, lookaside, grouprecipe, recipe, use
from conary.build.cook import signAbsoluteChangeset
from conary.conaryclient import cmdline
from conary.deps import deps
from conary.lib import log, magic, util
from conary.repository import trovesource
from conary import checkin
from conary import errors as conaryerrors
from conary import state
from conary import versions
from rmake import errors
from rmake import compat
from rmake.cmdline import cmdutil
from rmake.lib import recipeutil
from rmake.build import buildjob, buildtrove
BUILD_RECURSE_GROUPS_NONE = 0 # don't recurse groups, build the group only
BUILD_RECURSE_GROUPS_BINARY = 1 # find and recurse the binary version of the
# group
BUILD_RECURSE_GROUPS_SOURCE = 2 # find and recurce the source version of the
# group
def getBuildJob(buildConfig, conaryclient, troveSpecList,
message=None, recurseGroups=BUILD_RECURSE_GROUPS_NONE,
configDict=None, oldTroveDict=None, updateSpecs=None,
rebuild=False):
trovesByContext = {}
for troveSpec in list(troveSpecList):
if not isinstance(troveSpec, tuple):
troveSpec = cmdutil.parseTroveSpec(troveSpec)
if len(troveSpec) == 3:
context = ''
else:
context = troveSpec[3]
troveSpec = troveSpec[:3]
if troveSpec[2] is None:
troveSpec = (troveSpec[0], troveSpec[1], deps.parseFlavor(''))
trovesByContext.setdefault(context, []).append(troveSpec)
job = buildjob.BuildJob()
# don't store all the contexts with this job - they're useless past the
# initialization step.
if configDict:
mainConfig = configDict['']
job.setMainConfig(configDict[''])
else:
cfg = copy.deepcopy(buildConfig)
cfg.dropContexts()
mainConfig = cfg
mainConfig.recurseGroups = int(recurseGroups)
job.setMainConfig(mainConfig)
baseMatchRules = mainConfig.matchTroveRule
for contextStr, troveSpecList in trovesByContext.iteritems():
contextBaseMatchRules = baseMatchRules
if configDict and contextStr in configDict:
cfg = configDict[contextStr]
elif contextStr:
# making this a copy is critical
cfg = copy.deepcopy(buildConfig)
for context in contextStr.split(','):
cfg.setContext(context)
cfg.dropContexts()
else:
# don't bother with baseMatchRules in the base config.
contextBaseMatchRules = []
cfg = copy.deepcopy(buildConfig)
cfg.dropContexts()
contextStr = ''
job.setMainConfig(cfg)
cfg.initializeFlavors()
use.setBuildFlagsFromFlavor(None, cfg.buildFlavor, error=False)
if not cfg.buildLabel and cfg.installLabelPath:
cfg.buildLabel = cfg.installLabelPath[0]
troveSpecList = list(set(troveSpecList))
troveList = getTrovesToBuild(cfg, conaryclient, troveSpecList,
message=None,
recurseGroups=recurseGroups,
matchSpecs=contextBaseMatchRules + cfg.matchTroveRule,
reposName=mainConfig.reposName,
updateSpecs=updateSpecs)
if updateSpecs and oldTroveDict and contextStr in oldTroveDict:
troveList = _matchUpdateRestrictions(mainConfig.reposName,
oldTroveDict[contextStr],
troveList,
updateSpecs)
if rebuild:
prebuiltBinaries = _findLatestBinariesForTroves(conaryclient,
mainConfig.reposName,
troveList)
if not job.getMainConfig().prebuiltBinaries:
job.getMainConfig().prebuiltBinaries = prebuiltBinaries
else:
job.getMainConfig().prebuiltBinaries.extend(prebuiltBinaries)
if mainConfig.prepOnly:
buildType = buildtrove.TROVE_BUILD_TYPE_PREP
else:
buildType = buildtrove.TROVE_BUILD_TYPE_NORMAL
for name, version, flavor in troveList:
if flavor is None:
flavor = deps.parseFlavor('')
bt = buildtrove.BuildTrove(None, name, version, flavor,
context=contextStr,
buildType=buildType)
job.addTrove(name, version, flavor, contextStr, bt)
job.setTroveConfig(bt, cfg)
return job
def getTrovesToBuild(cfg, conaryclient, troveSpecList, message=None,
recurseGroups=BUILD_RECURSE_GROUPS_NONE, matchSpecs=None,
reposName=None, updateSpecs=None):
toBuild = []
toFind = {}
groupsToFind = []
if not matchSpecs:
matchSpecs = []
if reposName is None:
reposName = cfg.reposName
repos = conaryclient.getRepos()
cfg.resolveTroveTups = _getResolveTroveTups(cfg, repos)
cfg.recurseGroups = int(recurseGroups)
cfg.buildTroveSpecs = []
newTroveSpecs = []
recipesToCook = []
for troveSpec in list(troveSpecList):
if not isinstance(troveSpec, tuple):
troveSpec = cmdutil.parseTroveSpec(troveSpec)
if len(troveSpec) == 3:
context = ''
else:
context = troveSpec[3]
troveSpec = troveSpec[:3]
if (troveSpec[0].startswith('group-') and not recurseGroups
and not compat.ConaryVersion().supportsCloneNonRecursive()):
log.warning('You will not be able to commit this group build'
' without upgrading conary.')
if troveSpec[2] is None:
troveSpec = (troveSpec[0], troveSpec[1], deps.parseFlavor(''))
if (not troveSpec[1] and not os.path.isdir(troveSpec[0])
and os.access(troveSpec[0], os.R_OK)
and troveSpec[0].endswith('.recipe')):
# don't rely on cwd, but do allow for symlinks to change
# when restarting. Is that sane? Or should I just do realpath?
troveSpec = (os.path.abspath(troveSpec[0]),) + troveSpec[1:]
cfg.buildTroveSpecs.append(troveSpec)
recipesToCook.append((os.path.realpath(troveSpec[0]), troveSpec[2]))
continue
cfg.buildTroveSpecs.append(troveSpec)
if troveSpec[0].startswith('group-') and recurseGroups:
groupsToFind.append(troveSpec)
if recurseGroups == BUILD_RECURSE_GROUPS_SOURCE:
newTroveSpecs.append(troveSpec)
else:
newTroveSpecs.append(troveSpec)
localTroves = [(_getLocalCook(conaryclient, cfg, x[0], message), x[1])
for x in recipesToCook ]
localTroves = [(x[0][0], x[0][1], x[1]) for x in localTroves]
if recurseGroups == BUILD_RECURSE_GROUPS_SOURCE:
compat.ConaryVersion().requireFindGroupSources()
localGroupTroves = [ x for x in localTroves
if x[0].startswith('group-') ]
toBuild.extend(_findSourcesForSourceGroup(repos, reposName, cfg,
groupsToFind,
localGroupTroves,
updateSpecs))
elif recurseGroups == BUILD_RECURSE_GROUPS_BINARY:
newTroveSpecs.extend(_findSpecsForBinaryGroup(repos, reposName, cfg,
groupsToFind,
updateSpecs))
for troveSpec in newTroveSpecs:
sourceName = troveSpec[0].split(':')[0] + ':source'
s = toFind.setdefault((sourceName, troveSpec[1], None), [])
if troveSpec[2] not in s:
s.append(troveSpec[2])
results = repos.findTroves(cfg.buildLabel, toFind, None)
for troveSpec, troveTups in results.iteritems():
flavorList = toFind[troveSpec]
for troveTup in troveTups:
for flavor in flavorList:
toBuild.append((troveTup[0], troveTup[1], flavor))
toBuild.extend(localTroves)
if matchSpecs:
toBuild = _filterListByMatchSpecs(reposName, matchSpecs, toBuild)
return toBuild
def _filterListByMatchSpecs(reposName, matchSpecs, troveList):
matchSpecs = [ cmdline.parseTroveSpec(x, allowEmptyName=True)
for x in matchSpecs ]
hasAddSpec = False
newTroveList = []
for troveTup in troveList:
if troveTup[2] is None:
flavor = deps.parseFlavor('')
else:
flavor = troveTup[2]
newTroveList.append((troveTup[0], troveTup[1], flavor))
troveList = newTroveList
troveMap = {}
for troveTup in troveList:
key = (troveTup[0].split(':')[0], troveTup[1], troveTup[2])
troveMap.setdefault(key, []).append(troveTup)
finalMatchSpecs = {}
for matchSpec in matchSpecs:
name = matchSpec[0]
if name and name[0] == '-':
removeSpec = True
name = name[1:]
else:
hasAddSpec = True
removeSpec = False
if not name:
filterFn = lambda x: True
else:
filterFn = lambda x: fnmatch.fnmatchcase(x[0], name)
# add all packages that match glob (could be empty in which case
# all packages are added.
finalMatchSpecs.update(dict.fromkeys([(x[0], matchSpec[1],
matchSpec[2]) for x in troveMap
if filterFn(x)],
removeSpec))
troveSource = trovesource.SimpleTroveSource(troveMap)
troveSource = recipeutil.RemoveHostSource(troveSource,
reposName)
results = troveSource.findTroves(None, finalMatchSpecs, None,
allowMissing=True)
toRemove = []
toAdd = set()
for matchSpec, resultList in results.iteritems():
if not finalMatchSpecs[matchSpec]: # this matchSpec was prepended by
# a - sign
toAdd.update(resultList)
else:
toRemove.extend(resultList)
if not hasAddSpec:
toAdd = set(troveMap)
toAdd.difference_update(toRemove)
return list(itertools.chain(*(troveMap[x] for x in toAdd)))
def _matchUpdateRestrictions(reposName, oldTroveList,
newTroveList, updateSpecs,
binaries=False):
troveMap = {}
for troveTup in itertools.chain(oldTroveList, newTroveList):
if binaries:
key = (troveTup[0].split(':')[0], troveTup[1], troveTup[2])
else:
key = (troveTup[0].split(':')[0] + ':source',
troveTup[1], troveTup[2])
troveMap.setdefault(key, []).append(troveTup)
updateDict = {}
newUpdateSpecs = []
if not updateSpecs:
return newTroveList
firstMatch = True
for troveSpec in updateSpecs:
if not isinstance(troveSpec, tuple):
troveSpec = cmdutil.parseTroveSpec(troveSpec)
if binaries:
troveSpec = (troveSpec[0].split(':')[0], troveSpec[1], troveSpec[2])
else:
troveSpec = (troveSpec[0].split(':')[0] + ':source',
troveSpec[1], troveSpec[2])
if troveSpec[0] and troveSpec[0][0] == '-':
sense = False
troveSpec = (troveSpec[0][1:], troveSpec[1], troveSpec[2])
else:
sense = True
name = troveSpec[0]
if not name:
filterFn = lambda x: True
else:
filterFn = lambda x: fnmatch.fnmatchcase(x[0], name)
# add all packages that match glob (could be empty in which case
# all packages are added.
specs = set([(x[0], troveSpec[1], troveSpec[2]) for x in troveMap
if filterFn(x)])
if not specs:
newUpdateSpecs.append(troveSpec)
updateDict[troveSpec] = sense
updateDict.update(dict.fromkeys(specs, sense))
for spec in specs:
if spec in newUpdateSpecs:
newUpdateSpecs.remove(spec)
newUpdateSpecs.extend(specs)
allNewNames = set([ x[0] for x in newTroveList ])
allOldNames = set([ x[0] for x in oldTroveList ])
oldTroveList = [ x for x in oldTroveList if x[0] in allNewNames ]
oldTroves = trovesource.SimpleTroveSource(oldTroveList)
oldTroves = recipeutil.RemoveHostSource(oldTroves, reposName)
newTroves = trovesource.SimpleTroveSource(newTroveList)
newTroves = recipeutil.RemoveHostSource(newTroves, reposName)
toUse = set()
firstMatch = True
for updateSpec in newUpdateSpecs:
positiveMatch = updateDict[updateSpec]
oldResults = oldTroves.findTroves(None, [updateSpec], None,
allowMissing=True).get(updateSpec, [])
newResults = newTroves.findTroves(None, [updateSpec], None,
allowMissing=True).get(updateSpec, [])
oldNames = set(x[0] for x in oldResults)
newNames = set(x[0] for x in newResults)
if positiveMatch:
if firstMatch:
# if the user starts with --update info-foo then they want to
# by default not update anything not mentioned
toUse = set(oldTroveList)
toUse.update(x for x in newTroveList
if x[0] not in allOldNames)
firstMatch = False
# don't discard any packages for which we don't have
toKeep = [ x for x in toUse if x[0] not in newNames ]
toUse.difference_update(oldResults)
toUse.update(newResults)
toUse.update(toKeep)
else:
if firstMatch:
# if the user starts with --update -info-foo then they want to
# update everything _except_ info-foo
toUse = set(newTroveList)
firstMatch = False
toKeep = [ x for x in toUse if x[0] not in oldNames ]
toUse.difference_update(newResults)
toUse.update(oldResults)
toUse.update(toKeep)
return list(toUse)
def _getResolveTroveTups(cfg, repos):
# get resolve troves - use installLabelPath and install flavor
# for these since they're used for dep resolution
try:
allResolveTroves = itertools.chain(*cfg.resolveTroves)
results = repos.findTroves(cfg.installLabelPath,
list(allResolveTroves), cfg.flavor)
except Exception, err:
context = cfg.context
if not context:
context = 'default'
raise errors.RmakeError("Could not find resolve troves for [%s] context: %s\n" % (context, err))
resolveTroves = []
for resolveTroveSpecList in cfg.resolveTroves:
lst = []
for troveSpec in resolveTroveSpecList:
lst.extend(results[troveSpec])
resolveTroves.append(lst)
return resolveTroves
def _getLocalCook(conaryclient, cfg, recipePath, message):
if not hasattr(cook, 'getRecipeInfoFromPath'):
raise errors.RmakeError('Local cooks require at least conary 1.0.19')
recipeDir = os.path.dirname(recipePath)
# We do not want to sign commits to the local repository, doing so
# would require that we manage keys in this repository as well.
oldKey = cfg.signatureKey
oldMap = cfg.signatureKeyMap
oldInteractive = cfg.interactive
oldWorkDir = os.getcwd()
try:
cfg.signatureKey = None
cfg.signatureKeyMap = {}
cfg.interactive = False
if os.access(recipeDir + '/CONARY', os.R_OK):
os.chdir(recipeDir)
stateFile = state.ConaryStateFromFile(recipeDir + '/CONARY')
if stateFile.hasSourceState():
stateFile = stateFile.getSourceState()
if stateFile.getVersion() != versions.NewVersion():
return _shadowAndCommit(conaryclient, cfg, recipeDir,
stateFile, message)
else:
return _commitRecipe(conaryclient, cfg, recipePath, message,
branch=stateFile.getBranch())
return _commitRecipe(conaryclient, cfg, recipePath, message)
finally:
cfg.signatureKey = oldKey
cfg.signatureKeyMap = oldMap
cfg.interactive = oldInteractive
os.chdir(oldWorkDir)
def _getPathList(repos, cfg, recipePath, relative=False):
loader, recipeClass, sourceVersion = cook.getRecipeInfoFromPath(repos, cfg,
recipePath)
log.info("Getting relevant path information from %s..." % recipeClass.name)
recipeDir = os.path.dirname(recipePath)
srcdirs = [ recipeDir ]
recipeObj = None
buildLabel = sourceVersion.trailingLabel()
macros = {'buildlabel' : buildLabel.asString(),
'buildbranch' : sourceVersion.branch().asString()}
if recipe.isPackageRecipe(recipeClass):
recipeObj = recipeClass(cfg, None,
srcdirs, macros, lightInstance=True)
elif recipe.isGroupRecipe(recipeClass):
recipeObj = recipeClass(repos, cfg, buildLabel, None, None,
srcdirs=srcdirs,
extraMacros=macros)
else:
# no included files for the rest of the recipe types
pathList = [recipePath]
recipeObj = None
if recipeObj:
try:
if hasattr(recipeObj, 'loadPolicy'):
recipeObj.loadPolicy()
cook._callSetup(cfg, recipeObj)
except (conaryerrors.ConaryError, conaryerrors.CvcError), msg:
raise errors.RmakeError("could not initialize recipe: %s" % (msg))
pathList = recipeObj.fetchLocalSources() + [recipePath ]
if relative:
finalPathList = []
for path in pathList:
if path[0] == '/':
path = path[(len(recipeDir) +1):]
finalPathList.append(path)
else:
finalPathList = pathList
return recipeClass, finalPathList
def _hasNullNearBeginning(fileName):
return '\0' in file(fileName).read(1024*1024)
def _getConfigInfo(fileName):
# RMK-973: it is now quite important not to mark a file as config
# unless we can be quite confident that it can be handled as such
fileMagic = magic.magic(fileName)
if fileMagic:
if isinstance(fileMagic, (magic.script, magic.ltwrapper)):
return not _hasNullNearBeginning(fileName)
else:
# all other magic-recognized files are binary
return False
else:
if checkin.nonCfgRe.match(fileName):
return False
elif checkin.cfgRe.match(fileName):
return not _hasNullNearBeginning(fileName)
return False
def _shadowAndCommit(conaryclient, cfg, recipeDir, stateFile, message):
repos = conaryclient.getRepos()
conaryCompat = compat.ConaryVersion()
oldSourceVersion = stateFile.getVersion()
targetLabel = cfg.getTargetLabel(oldSourceVersion)
if not targetLabel:
raise errors.RmakeError(
'Cannot cook local recipes unless a target label is set')
skipped, cs = conaryclient.createShadowChangeSet(str(targetLabel),
[stateFile.getNameVersionFlavor()])
recipePath = recipeDir + '/' + stateFile.getName().split(':')[0] + '.recipe'
recipeClass, pathList = _getPathList(repos, cfg, recipePath, relative=True)
troveName = stateFile.getName()
troveVersion = stateFile.getVersion()
if not skipped:
signAbsoluteChangeset(cs, None)
repos.commitChangeSet(cs)
log.info("Shadowing %s to internal repository..." % troveName)
shadowBranch = troveVersion.createShadow(targetLabel).branch()
shadowVersion = repos.findTrove(None,
(troveName, str(shadowBranch),
None), None)[0][1]
cwd = os.getcwd()
prefix = 'rmake-shadow-%s-' % troveName.split(':')[0]
shadowSourceDir = tempfile.mkdtemp(prefix=prefix)
try:
log.info("Committing local changes to %s to the"
" internal repository..." % troveName)
log.resetErrorOccurred()
checkin.checkout(repos, cfg, shadowSourceDir,
['%s=%s' % (troveName, shadowVersion)])
if compat.ConaryVersion().stateFileVersion() > 0:
kw = dict(repos=repos)
else:
kw = {}
# grab new and old state and make any modifications due to adding
# or deleting of files (we assume files that don't exist are
# autosource and can be ignored)
oldState = conaryCompat.ConaryStateFromFile(recipeDir + '/CONARY',
repos=repos).getSourceState()
newConaryState = conaryCompat.ConaryStateFromFile(
shadowSourceDir + '/CONARY',
repos=repos)
newState = newConaryState.getSourceState()
neededFiles = set(x[1] for x in oldState.iterFileList()
if os.path.exists(os.path.join(recipeDir, x[1])))
neededFiles.update(pathList)
autoSourceFiles = set(x[1] for x in oldState.iterFileList()
if oldState.fileIsAutoSource(x[0]))
existingFiles = set(x[1] for x in newState.iterFileList()
if os.path.exists(os.path.join(shadowSourceDir, x[1])))
toCopy = neededFiles & existingFiles
toDel = existingFiles - neededFiles
toAdd = neededFiles - existingFiles
for sourceFile in (toCopy | toAdd):
newPath = os.path.join(shadowSourceDir, sourceFile)
if os.path.dirname(sourceFile):
util.mkdirChain(os.path.dirname(newPath))
if os.path.isdir(sourceFile):
util.mkdirChain(newPath)
else:
shutil.copyfile(os.path.join(recipeDir, sourceFile), newPath)
os.chdir(shadowSourceDir)
if hasattr(cfg.sourceSearchDir, '_getUnexpanded'):
cfg.configKey('sourceSearchDir',
cfg.sourceSearchDir._getUnexpanded())
for f in toDel:
checkin.removeFile(f)
if toDel:
# toDel modifies the CONARY file on disk, so reload with the
# changes made there.
newState = conaryCompat.ConaryStateFromFile(
shadowSourceDir + '/CONARY',
repos=repos).getSourceState()
if conaryCompat.stateFileVersion() == 0:
checkin.addFiles(toAdd)
else:
oldPathIds = dict((x[1], x[0]) for x in oldState.iterFileList())
for path in toAdd:
if path in oldPathIds:
isConfig = oldState.fileIsConfig(oldPathIds[path])
else:
isConfig = _getConfigInfo(path)
checkin.addFiles([path], binary=not isConfig, text=isConfig)
if toAdd:
# get the new pathIDs for all the added troves,
# since we can't set the refresh setting without the
# needed pathIds
newState = conaryCompat.ConaryStateFromFile(
shadowSourceDir + '/CONARY',
repos=repos).getSourceState()
newPathIds = dict((x[1], x[0]) for x in newState.iterFileList())
for path in (toCopy | toAdd):
if path in oldPathIds:
isConfig = oldState.fileIsConfig(oldPathIds[path])
else:
isConfig = _getConfigInfo(path)
newState.fileIsConfig(newPathIds[path], isConfig)
for path in autoSourceFiles:
if path in newPathIds:
needsRefresh = oldState.fileNeedsRefresh(oldPathIds[path])
newState.fileNeedsRefresh(newPathIds[path], needsRefresh)
# if the factory changed, update it
if newState.getFactory() != oldState.getFactory():
newState.setFactory(oldState.getFactory())
# we may have modified the state file. Write it back out to
# disk so it will be picked up by the commit.
newConaryState.setSourceState(newState)
newConaryState.write(shadowSourceDir + '/CONARY')
if message is None and compat.ConaryVersion().supportsCloneCallback():
message = 'Automated rMake commit'
_doCommit('%s/%s' % (recipeDir, troveName), repos, cfg, message)
newState = state.ConaryStateFromFile(shadowSourceDir + '/CONARY', **kw)
return newState.getSourceState().getNameVersionFlavor()
finally:
os.chdir(cwd)
if hasattr(cfg.sourceSearchDir, '_getUnexpanded'):
cfg.configKey('sourceSearchDir',
cfg.sourceSearchDir._getUnexpanded())
shutil.rmtree(shadowSourceDir)
def _doCommit(recipePath, repos, cfg, message):
try:
kw = {}
if compat.ConaryVersion().supportsForceCommit():
kw.update(force=True)
rv = checkin.commit(repos, cfg, message, **kw)
except (conaryerrors.CvcError, conaryerrors.ConaryError), msg:
raise errors.RmakeError("Could not commit changes to build"
" recipe %s: %s" % (recipePath, msg))
if log.errorOccurred():
raise errors.RmakeError("Could not commit changes to build"
" local file %s" % recipePath)
return rv
def _commitRecipe(conaryclient, cfg, recipePath, message, branch=None):
repos = conaryclient.getRepos()
conaryCompat = compat.ConaryVersion()
recipeClass, pathList = _getPathList(repos, cfg, recipePath)
sourceName = recipeClass.name + ':source'
log.info("Creating a copy of %s in the rMake internal repository..." % recipeClass.name)
cwd = os.getcwd()
recipeDir = tempfile.mkdtemp()
log.resetErrorOccurred()
try:
fileNames = []
# Create a source trove that matches the recipe we're trying to cook
if not branch:
branch = versions.Branch([cfg.buildLabel])
targetLabel = cfg.getTargetLabel(branch)
if compat.ConaryVersion().supportsNewPkgBranch():
buildBranch = branch.createShadow(targetLabel)
kw = dict(buildBranch=buildBranch)
else:
buildBranch = versions.Branch([targetLabel])
kw={}
cfg.buildLabel = targetLabel
if not repos.getTroveLeavesByBranch(
{ sourceName : { buildBranch : None } }).get(sourceName, None):
# we pass it None for repos to avoid the label-based check for
# existing packages.
checkin.newTrove(None, cfg, recipeClass.name, dir=recipeDir, **kw)
else:
# see if this package exists on our build branch
checkin.checkout(repos, cfg, recipeDir,
['%s=%s' % (sourceName, buildBranch)])
os.chdir(recipeDir)
sourceState = state.ConaryStateFromFile(recipeDir + '/CONARY').getSourceState()
fileNames = dict((os.path.basename(x), x) for x in pathList)
for (pathId, baseName, fileId, version) in list(sourceState.iterFileList()):
# update or remove any currently existing files
if baseName not in fileNames:
sourceState.removeFilePath(baseName)
else:
shutil.copyfile(fileNames[baseName],
os.path.join(recipeDir, baseName))
del fileNames[baseName]
for baseName, path in fileNames.iteritems():
shutil.copyfile(path, os.path.join(recipeDir, baseName))
if conaryCompat.stateFileVersion() > 0:
# mark all the files as binary - this this version can
# never be checked in, it doesn't really matter, but
# conary likes us to give a value.
for fileName in fileNames:
isConfig = _getConfigInfo(fileName)
checkin.addFiles([fileName], binary=not isConfig, text=isConfig)
else:
checkin.addFiles(fileNames)
_doCommit(recipePath, repos, cfg, 'Temporary recipe build for rmake')
newState = conaryCompat.ConaryStateFromFile(recipeDir + '/CONARY',
repos=repos)
return newState.getSourceState().getNameVersionFlavor()
finally:
os.chdir(cwd)
shutil.rmtree(recipeDir)
def _findSpecsForBinaryGroup(repos, reposName, cfg, groupsToFind, updateSpecs):
newTroveSpecs = []
results = repos.findTroves(cfg.buildLabel,
groupsToFind, cfg.buildFlavor)
groupTuples = []
for troveSpec, troveList in results.iteritems():
for troveTup in troveList:
groupTuples.append((troveTup[0], troveTup[1], troveTup[2]))
groupTuples = _matchUpdateRestrictions(reposName,
cfg.recursedGroupTroves,
troveList,
updateSpecs, binaries=True)
groups = repos.getTroves(groupTuples)
groups = dict(itertools.izip(groupTuples, groups))
cfg.recursedGroupTroves.extend(groupTuples)
# line up troveSpec flavors to trovetuples
troveSpecsByName = {}
for troveSpec in groupsToFind:
troveSpecsByName.setdefault(troveSpec[0], []).append(troveSpec[2])
for groupTup in groupTuples:
group = groups[groupTup]
for flavor in troveSpecsByName[groupTup[0]]:
groupSource = (group.getSourceName(),
group.getVersion().getSourceVersion(False),
flavor)
newTroveSpecs.append(groupSource)
troveTups = list(group.iterTroveList(strongRefs=True,
weakRefs=True))
troveTups = ((x[0].split(':')[0], x[1], x[2])
for x in troveTups)
troveTups = (x for x in troveTups
if not x[0].startswith('group-'))
troveTups = list(set(troveTups))
troveList = repos.getTroves(troveTups, withFiles=False)
for trove in troveList:
n = trove.getSourceName()
newTroveSpecs.append((n,
trove.getVersion().getSourceVersion().branch(),
trove.getFlavor()))
return newTroveSpecs
def _findSourcesForSourceGroup(repos, reposName, cfg, groupsToFind,
localGroups, updateSpecs):
findSpecs = {}
for troveSpec in groupsToFind:
sourceName = troveSpec[0].split(':')[0] + ':source'
l = findSpecs.setdefault((sourceName, troveSpec[1], None), [])
l.append(troveSpec[2])
results = repos.findTroves(cfg.buildLabel, findSpecs, cfg.flavor)
allTups = []
groupTuples = []
for troveSpec, troveTupList in results.iteritems():
flavors = findSpecs[troveSpec]
for flavor in flavors:
for troveTup in troveTupList:
name, version = troveTup[0:2]
groupTuples.append((name, version, flavor))
groupTuples += localGroups
groupTuples = _matchUpdateRestrictions(reposName,
cfg.recursedGroupTroves,
groupTuples,
updateSpecs)
cfg.recursedGroupTroves = groupTuples
troves = repos.getTroves([(x[0], x[1], deps.Flavor()) for x in groupTuples])
for (name, version, flavor), trv in itertools.izip(groupTuples, troves):
localRepos = recipeutil.RemoveHostRepos(repos, reposName)
if version.getHost() == reposName:
realLabel = version.branch().parentBranch().label()
else:
realLabel = version.trailingLabel()
(loader, recipeObj, relevantFlavor) = \
recipeutil.loadRecipe(repos, name, version, flavor, trv,
defaultFlavor=cfg.buildFlavor,
installLabelPath=cfg.installLabelPath,
buildLabel=realLabel)
troveTups = grouprecipe.findSourcesForGroup(localRepos, recipeObj)
allTups.extend(troveTups)
allTups = [ x for x in allTups if not x[0].startswith('group-') ]
return allTups
def displayBuildInfo(job, verbose=False, quiet=False):
trovesByContext = {}
configDict = job.getConfigDict()
for (n,v,f, context) in sorted(job.iterTroveList(withContexts=True)):
trovesByContext.setdefault(context, []).append((n,v,f))
if not quiet:
if '' not in trovesByContext:
print '\n{Default Context}\n'
config = configDict['']
if verbose:
config.setDisplayOptions(hidePasswords=True)
config.display()
else:
config.displayKey('copyInConary')
config.displayKey('copyInConfig')
for context, troveList in sorted(trovesByContext.iteritems()):
if not quiet:
config = configDict[context]
if not context:
print '\n{Default Context}\n'
else:
print '\n{%s}\n' % context
print 'ResolveTroves:'
for idx, resolveTroveList in enumerate(config.resolveTroveTups):
print ''
for n,v,f in sorted(resolveTroveList):
print '%s=%s[%s]' % (n, v, f)
print ''
print 'Configuration:'
config.setDisplayOptions(hidePasswords=True)
if verbose:
config.display()
else:
if not context:
config.displayKey('copyInConfig')
config.displayKey('copyInConary')
config.displayKey('buildFlavor')
config.displayKey('flavor')
config.displayKey('installLabelPath')
config.displayKey('repositoryMap')
config.displayKey('resolveTrovesOnly')
config.displayKey('user')
print ''
print 'Building:'
for n,v,f in troveList:
if f is not None and not f.isEmpty():
f = '[%s]' % f
else:
f = ''
if context:
contextStr = '{%s}' % context
else:
contextStr = ''
print '%s=%s/%s%s%s' % (n, v.trailingLabel(),
v.trailingRevision(), f,
contextStr)
def _findLatestBinariesForTroves(conaryclient, reposName, troveList):
# The only possible built binaries are those with exactly the same
# branch.
repos = conaryclient.getRepos()
troveSpecs = []
for troveTup in troveList:
if (troveTup[1].trailingLabel().getHost() == reposName
and troveTup[1].branch().hasParentBranch()):
troveSpecs.append((troveTup[0].split(':')[0],
str(troveTup[1].branch().parentBranch().label()),
None))
else:
troveSpecs.append((troveTup[0].split(':')[0],
str(troveTup[1].trailingLabel()),
None))
results = repos.findTroves(None, troveSpecs, None, allowMissing=True)
binaryTroveList = list(itertools.chain(*results.itervalues()))
return binaryTroveList
| |
"""
This collection of functions scrapes Box Office Returns at the
weekly, weekend, and daily levels from a film's page on Box Office Mojo.
Last Edit: March, 2017
"""
import requests
from bs4 import BeautifulSoup
import re
import dateutil.parser
from string import ascii_uppercase
import pandas as pd
# import pickle
import time
import urllib.request
import csv
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
sess.mount('http://', adapter)
# First Information about dates
def extract_year(anchorString):
'''
Find the year the current data belongs to
'''
try:
year=re.findall(r'20[0-9][0-9]', anchorString)[0]
return year
except:
return None
def extract_calendarWeek(anchorString):
'''
Find the calendar week the current data belongs to
'''
try:
calendarWeek=re.findall(r'wk\=(.[0-9]{1})', anchorString)[0]
return calendarWeek
except:
pass
try:
calendarWeek=re.findall(r'wknd\=(.[0-9]{1})', anchorString)[0]
return calendarWeek
except:
return None
def extract_date(anchorString):
'''
Find the start and end date of the Calendar Week
'''
try:
date = re.findall(r'<b>(.+?)<', anchorString)[0]
# clean out any badly parsed symbols
date = re.sub('\x96', '-', date)
return date
except:
return None
def find_dateInfo(anchorString):
'''
Returns all relevant date information contained in the Box Office mojo href string
'''
#obj = str(anchor)
year=extract_year(anchorString)
calendarWeek=extract_calendarWeek(anchorString)
date=extract_date(anchorString)
return year, calendarWeek, date
# Now Box Office Relevant information
def money_to_int(moneystring):
'''
A helper function to strip out dollar signs ($) and commas leaving any
dollar value as a integer
'''
try:
moneystring = moneystring.replace('$', '').replace(',', '')
return int(moneystring)
except:
return moneystring
def get_weekly_movieRank(anchor):
'''
Return the Rank of the movie over a given time period.
Rank compares a movie's Box Office takings to other movies currently in cinemas
'''
try:
rank_tag = anchor.find_next("td")
rank = rank_tag.get_text()
return rank
except:
return None
def get_boxOffice(anchor):
'''
Return the Rank of the movie over a given week or weekend.
'''
try:
boxOffice_tag = anchor.find_next("td").find_next("td")
boxOffice = boxOffice_tag.get_text()
boxOffice = money_to_int(boxOffice)
return boxOffice
except:
return None
def get_theatres(anchor):
'''
Return the number of theatres the movie was showing in over a given
week/weekend
The data are always reported as constant over a week, using the
weekend number as the number of theatres.
'''
try:
theatres_tag = anchor.find_next("td").find_next("td").find_next("td").find_next("td")
theatres = theatres_tag.get_text()
theatres = int(theatres.replace(',' , ''))
return theatres
except:
return None
def get_totalBoxOfficeToDate(anchor):
'''
Return the the total box office returns of a film upto (and including)
that week/weekend
'''
try:
totalBoxOffice_tag = anchor.find_next("td").find_next("td").find_next("td").find_next("td").find_next("td").find_next("td").find_next("td")
totalBoxOffice = totalBoxOffice_tag.get_text()
totalBoxOffice = money_to_int(totalBoxOffice)
return totalBoxOffice
except:
return None
def identify_longWeekend(df):
'''
Identifies long weekends by a leading <i> on the date column.
Creates Dummy variable for long weekends, and then cleans up the date column
and passes data frame back to user
'''
df['longWeekend'] = df.date.str.contains('<i>')
df['date'] = df.date.str.replace('<i>', '')
return df
def scrape_BoxOfficeInfo(href_pattern, soup, movie_id):
'''
Scrape the necessary Box Office information from the webpage
'''
df_movie = pd.DataFrame()
for iAnchor in soup.findAll('a', href=href_pattern):
## convert to string for regular expression parsing
anchorString = str(iAnchor)
## Get date information from stripping info from inside the href link
year, calendarWeek, date = find_dateInfo(anchorString)
## Get Box Office Information etc
rank = get_weekly_movieRank(iAnchor)
boxOffice = get_boxOffice(iAnchor)
theatres = get_theatres(iAnchor)
grossBoxOffice = get_totalBoxOfficeToDate(iAnchor)
## Put data into a weekly data-frame
df_week = pd.DataFrame([[movie_id, year, calendarWeek, date,
rank, boxOffice, theatres, grossBoxOffice
]]
)
## append that week to existing data
df_movie = df_movie.append(df_week, ignore_index=True)
## end for loop
# label the columns
if not df_movie.empty:
df_movie.columns = ["movie_id", "year", "calendarWeek", "date", "rank",
"boxOffice", "theatres", "grossBoxOffice"]
return df_movie
else:
pass
def scrape_dailyBoxOfficeInfo(href_pattern, soup, movie_id):
'''
Scrape the necessary daily Box Office information from the webpage.
Daily Box Office returns are stored in a different pattern than weekly and
weekend returns, so need a separate scraper
'''
df_movie = pd.DataFrame()
for iAnchor in soup.findAll('a', href=href_pattern):
## convert to string for regular expression parsing
anchorString = str(iAnchor)
# date Information
try:
year=re.findall(r'20[0-9][0-9]', anchorString)[0]
except:
year = None
try:
date = re.findall(r'<b>(.+?)<', anchorString)[0]
date = re.sub('\x96', '-', date)
except:
date = None
# Get Box Office Information etc
try:
rank_tag = iAnchor.find_next("td")
rank = rank_tag.get_text()
except:
rank = None
# here is box office
try:
boxOffice_tag = rank_tag.find_next("td")
boxOffice = boxOffice_tag.get_text()
boxOffice = money_to_int(boxOffice)
except:
boxOffice = None
# find theatres
try:
theatres_tag = boxOffice_tag.find_next("td").find_next("td").find_next("td").contents[0]
theatres = theatres_tag.get_text()
theatres = int(theatres.replace(',' , ''))
except:
theatres = None
# find gross to date
try:
grossBO_tag = theatres_tag.find_next("td").find_next("td").contents[0]
grossBoxOffice = grossBO_tag.get_text()
grossBoxOffice = money_to_int(grossBoxOffice)
except:
grossBoxOffice = None
# get day of release
try:
dayOfRelease_tag = grossBO_tag.find_next("td").contents[0]
dayOfRelease = dayOfRelease_tag.get_text()
except:
dayOfRelease = None
# package it up
df_week = pd.DataFrame([[movie_id, year, date,
rank, boxOffice, theatres, grossBoxOffice, dayOfRelease
]]
)
df_movie = df_movie.append(df_week, ignore_index=True)
## label the columns
if not df_movie.empty:
df_movie.columns = ["movie_id", "year", "date", "rank", "boxOffice",
"theatres", "grossBoxOffice", "dayOfRelease"]
return df_movie
else:
pass
def process_weekendBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects weekend
Box Office information.
'''
href_pattern = re.compile('^/weekend/chart/\?yr')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Weekend Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=weekend&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_BoxOfficeInfo(href_pattern, soup, movie_id)
# clean up long weekend information
if df_movie is not None:
df_movie = identify_longWeekend(df_movie)
else:
pass
return movie_id, df_movie
def process_weeklyBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects weekly
Box Office information.
'''
href_pattern = re.compile('^/weekly/chart/')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Weekly Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=weekly&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_BoxOfficeInfo(href_pattern, soup, movie_id)
return movie_id, df_movie
def process_dailyBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects daily
Box Office information.
'''
href_pattern = re.compile('^/daily/chart/\?sortdate=')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Daily Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=daily&view=chart&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_dailyBoxOfficeInfo(href_pattern, soup, movie_id)
return movie_id, df_movie
| |
import random
import string
import sys
from typing import (
Callable,
Iterable,
List,
Set,
)
from unittest import skip
from darglint.token import (
TokenType,
Token,
)
from darglint.config import (
get_config,
Configuration,
)
REFACTORING_COMPLETE = True
def require_python(major=3, minor=8):
"""Skip a unit test if the python version is too old.
Args:
major: The major python version.
minor: The minor python version.
Returns:
The function, possibly wrapped by `skip`.
"""
def _wrapper(fn):
if sys.version_info.major < major:
return skip(fn)
if sys.version_info.minor < minor:
return skip(fn)
return fn
return _wrapper
def replace(name=''):
# type: (str) -> Callable
"""Decorates a function which must be replaced.
If the above global, REFACTORING_COMPLETE is True,
then it will fail if no alternate is defined for
it.
Args:
name: The name of the method which is replacing it.
Returns:
The same function, failing if the refactoring is complete.
"""
def wrapper(fn):
# type: (Callable) -> Callable
def _inner(*args, **kwargs):
self = args[0]
if not hasattr(self, name) and REFACTORING_COMPLETE:
self.fail('No replacement defined!')
return fn(*args, **kwargs)
return _inner
return wrapper
def remove(fn):
# type: (Callable) -> Callable
"""Describes a method which should be removed after refactoring.
Args:
fn: The method which should be removed.
Returns:
A method which will fail if refactoring has completed.
"""
def _inner(*args, **kwargs):
if REFACTORING_COMPLETE:
self = args[0]
self.fail('This should have been removed!')
return fn(*args, **kwargs)
return _inner
def random_string(min_length=1, max_length=20):
# type: (int, int) -> str
ret = ''
for i in range(random.randint(min_length, max_length)):
ret += random.choice(string.ascii_letters)
return ret
def random_tokens(min_length=1, max_length=20, exclude=set()):
# type: (int, int, Set[TokenType]) -> Iterable[Token]
allowable = [x for x in TokenType if x not in exclude]
ret = list() # type: List[Token]
line_number = 0
for i in range(random.randint(min_length, max_length)):
_type = random.choice(allowable) # type: TokenType
if _type == TokenType.ARGUMENTS:
value = 'Args'
elif _type == TokenType.COLON:
value = ':'
elif _type == TokenType.DOCTERM:
value = '"""'
elif _type == TokenType.HASH:
value = '#'
elif _type == TokenType.INDENT:
value = ' '
elif _type == TokenType.LPAREN:
value = '('
elif _type == TokenType.NEWLINE:
value = '\n'
elif _type == TokenType.RAISES:
value = 'Raises'
elif _type == TokenType.RETURNS:
value = 'Returns'
elif _type == TokenType.RPAREN:
value = ')'
elif _type == TokenType.WORD:
value = random_string()
elif _type == TokenType.YIELDS:
value = 'Yields'
elif _type == TokenType.NOQA:
value = 'noqa'
elif _type == TokenType.RETURN_TYPE:
value = random_string()
elif _type == TokenType.YIELD_TYPE:
value = random_string()
elif _type == TokenType.VARIABLES:
value = random.choice(['var', 'ivar', 'cvar'])
elif _type == TokenType.VARIABLE_TYPE:
value = random_string()
elif _type == TokenType.ARGUMENT_TYPE:
value = random_string()
elif _type == TokenType.OTHER:
value = 'Other'
elif _type == TokenType.RECEIVES:
value = 'Receives'
elif _type == TokenType.HEADER:
value = '--------'
elif _type == TokenType.WARNS:
value = 'Warns'
elif _type == TokenType.SEE:
value = 'See'
elif _type == TokenType.ALSO:
value = 'Also'
elif _type == TokenType.NOTES:
value = 'Notes'
elif _type == TokenType.EXAMPLES:
value = 'Examples'
elif _type == TokenType.REFERENCES:
value = 'References'
else:
raise Exception('Unexpected token type {}'.format(
_type
))
ret.append(Token(
token_type=_type,
value=value,
line_number=line_number,
))
line_number += random.choice([0, 1])
return ret
def reindent(program):
"""Reindent the program.
This makes it a little more natural for writing the
program in a string.
Args:
program: A program which is indented too much.
Returns:
The program, reindented.
"""
# Find the first non-space character in a line.
def _non_space(line):
for i, c in enumerate(line):
if c != ' ':
return i
return -1
lines = program.split('\n')
amount = min(filter(lambda x: x >= 0, map(_non_space, lines)))
ret = '\n'.join(
line[amount:] for line in lines
)
return ret
| |
# -*- coding: utf-8 -*-
"""DataFrame client for InfluxDB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from collections import defaultdict
import pandas as pd
import numpy as np
from .client import InfluxDBClient
from .line_protocol import _escape_tag
def _pandas_time_unit(time_precision):
unit = time_precision
if time_precision == 'm':
unit = 'ms'
elif time_precision == 'u':
unit = 'us'
elif time_precision == 'n':
unit = 'ns'
assert unit in ('s', 'ms', 'us', 'ns')
return unit
def _escape_pandas_series(s):
return s.apply(lambda v: _escape_tag(v))
class DataFrameClient(InfluxDBClient):
"""DataFrameClient instantiates InfluxDBClient to connect to the backend.
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
def write_points(self,
dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None,
database=None,
retention_policy=None,
batch_size=None,
protocol='line',
numeric_precision=None):
"""Write to multiple time series names.
:param dataframe: data points in a DataFrame
:param measurement: name of measurement
:param tags: dictionary of tags, with string key-values
:param time_precision: [Optional, default None] Either 's', 'ms', 'u'
or 'n'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:param numeric_precision: Precision for floating point values.
Either None, 'full' or some int, where int is the desired decimal
precision. 'full' preserves full precision for int and float
datatypes. Defaults to None, which preserves 14-15 significant
figures for float and all significant figures for int datatypes.
"""
if tag_columns is None:
tag_columns = []
if field_columns is None:
field_columns = []
if batch_size:
number_batches = int(math.ceil(len(dataframe) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
global_tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe,
measurement=measurement,
global_tags=tags,
tag_columns=tag_columns,
field_columns=field_columns,
time_precision=time_precision,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe,
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
def query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0,
dropna=True):
"""
Quering data into a DataFrame.
:param query: the actual query string
:param params: additional parameters for the request, defaults to {}
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:param expected_response_code: the expected status code of response,
defaults to 200
:param database: database to query, defaults to None
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:param dropna: drop columns where all values are missing
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
query_args = dict(params=params,
epoch=epoch,
expected_response_code=expected_response_code,
raise_errors=raise_errors,
chunked=chunked,
database=database,
chunk_size=chunk_size)
results = super(DataFrameClient, self).query(query, **query_args)
if query.strip().upper().startswith("SELECT"):
if len(results) > 0:
return self._to_dataframe(results, dropna)
else:
return {}
else:
return results
def _to_dataframe(self, rs, dropna=True):
result = defaultdict(list)
if isinstance(rs, list):
return map(self._to_dataframe, rs)
for key, data in rs.items():
name, tags = key
if tags is None:
key = name
else:
key = (name, tuple(sorted(tags.items())))
df = pd.DataFrame(data)
df.time = pd.to_datetime(df.time)
df.set_index('time', inplace=True)
df.index = df.index.tz_localize('UTC')
df.index.name = None
result[key].append(df)
for key, data in result.items():
df = pd.concat(data).sort_index()
if dropna:
df.dropna(how='all', axis=1, inplace=True)
result[key] = df
return result
@staticmethod
def _convert_dataframe_to_json(dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or '
'PeriodIndex.')
# Make sure tags and tag columns are correctly typed
tag_columns = tag_columns if tag_columns is not None else []
field_columns = field_columns if field_columns is not None else []
tags = tags if tags is not None else {}
# Assume field columns are all columns not included in tag columns
if not field_columns:
field_columns = list(
set(dataframe.columns).difference(set(tag_columns)))
dataframe.index = dataframe.index.to_datetime()
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
# Convert column to strings
dataframe.columns = dataframe.columns.astype('str')
# Convert dtype for json serialization
dataframe = dataframe.astype('object')
precision_factor = {
"n": 1,
"u": 1e3,
"ms": 1e6,
"s": 1e9,
"m": 1e9 * 60,
"h": 1e9 * 3600,
}.get(time_precision, 1)
points = [
{'measurement': measurement,
'tags': dict(list(tag.items()) + list(tags.items())),
'fields': rec,
'time': np.int64(ts.value / precision_factor)}
for ts, tag, rec in zip(dataframe.index,
dataframe[tag_columns].to_dict('record'),
dataframe[field_columns].to_dict('record'))
]
return points
def _convert_dataframe_to_lines(self,
dataframe,
measurement,
field_columns=None,
tag_columns=None,
global_tags=None,
time_precision=None,
numeric_precision=None):
dataframe = dataframe.dropna(how='all').copy()
if len(dataframe) == 0:
return []
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or '
'PeriodIndex.')
# Create a Series of columns for easier indexing
column_series = pd.Series(dataframe.columns)
if field_columns is None:
field_columns = []
if tag_columns is None:
tag_columns = []
if global_tags is None:
global_tags = {}
# Make sure field_columns and tag_columns are lists
field_columns = list(field_columns) if list(field_columns) else []
tag_columns = list(tag_columns) if list(tag_columns) else []
# If field columns but no tag columns, assume rest of columns are tags
if field_columns and (not tag_columns):
tag_columns = list(column_series[~column_series.isin(
field_columns)])
# If no field columns, assume non-tag columns are fields
if not field_columns:
field_columns = list(column_series[~column_series.isin(
tag_columns)])
precision_factor = {
"n": 1,
"u": 1e3,
"ms": 1e6,
"s": 1e9,
"m": 1e9 * 60,
"h": 1e9 * 3600,
}.get(time_precision, 1)
# Make array of timestamp ints
if isinstance(dataframe.index, pd.PeriodIndex):
time = ((dataframe.index.to_timestamp().values.astype(np.int64) /
precision_factor).astype(np.int64).astype(str))
else:
time = ((pd.to_datetime(dataframe.index).values.astype(np.int64) /
precision_factor).astype(np.int64).astype(str))
# If tag columns exist, make an array of formatted tag keys and values
if tag_columns:
# Make global_tags as tag_columns
if global_tags:
for tag in global_tags:
dataframe[tag] = global_tags[tag]
tag_columns.append(tag)
tag_df = dataframe[tag_columns]
tag_df = tag_df.fillna('') # replace NA with empty string
tag_df = tag_df.sort_index(axis=1)
tag_df = self._stringify_dataframe(
tag_df, numeric_precision, datatype='tag')
# join preprendded tags, leaving None values out
tags = tag_df.apply(
lambda s: [',' + s.name + '=' + v if v else '' for v in s])
tags = tags.sum(axis=1)
del tag_df
elif global_tags:
tag_string = ''.join(
[",{}={}".format(k, _escape_tag(v)) if v else ''
for k, v in sorted(global_tags.items())]
)
tags = pd.Series(tag_string, index=dataframe.index)
else:
tags = ''
# Make an array of formatted field keys and values
field_df = dataframe[field_columns]
field_df = self._stringify_dataframe(field_df,
numeric_precision,
datatype='field')
def format_line(line):
line = line[~line.isnull()] # drop None entries
return ",".join((line.index + '=' + line.values))
fields = field_df.apply(format_line, axis=1)
del field_df
# Generate line protocol string
measurement = _escape_tag(measurement)
points = (measurement + tags + ' ' + fields + ' ' + time).tolist()
return points
@staticmethod
def _stringify_dataframe(dframe, numeric_precision, datatype='field'):
# Prevent modification of input dataframe
dframe = dframe.copy()
# Keep the positions where Null values are found
mask_null = dframe.isnull().values
# Find int and string columns for field-type data
int_columns = dframe.select_dtypes(include=['integer']).columns
string_columns = dframe.select_dtypes(include=['object']).columns
# Convert dframe to string
if numeric_precision is None:
# If no precision specified, convert directly to string (fast)
dframe = dframe.astype(str)
elif numeric_precision == 'full':
# If full precision, use repr to get full float precision
float_columns = (dframe.select_dtypes(
include=['floating']).columns)
nonfloat_columns = dframe.columns[~dframe.columns.isin(
float_columns)]
dframe[float_columns] = dframe[float_columns].applymap(repr)
dframe[nonfloat_columns] = (dframe[nonfloat_columns].astype(str))
elif isinstance(numeric_precision, int):
# If precision is specified, round to appropriate precision
float_columns = (dframe.select_dtypes(
include=['floating']).columns)
nonfloat_columns = dframe.columns[~dframe.columns.isin(
float_columns)]
dframe[float_columns] = (dframe[float_columns].round(
numeric_precision))
# If desired precision is > 10 decimal places, need to use repr
if numeric_precision > 10:
dframe[float_columns] = (dframe[float_columns].applymap(repr))
dframe[nonfloat_columns] = (dframe[nonfloat_columns]
.astype(str))
else:
dframe = dframe.astype(str)
else:
raise ValueError('Invalid numeric precision.')
if datatype == 'field':
# If dealing with fields, format ints and strings correctly
dframe[int_columns] += 'i'
dframe[string_columns] = '"' + dframe[string_columns] + '"'
elif datatype == 'tag':
dframe = dframe.apply(_escape_pandas_series)
dframe.columns = dframe.columns.astype(str)
dframe = dframe.where(~mask_null, None)
return dframe
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 'h':
return seconds / 3600
elif time_precision == 'm':
return seconds / 60
elif time_precision == 's':
return seconds
elif time_precision == 'ms':
return seconds * 1e3
elif time_precision == 'u':
return seconds * 1e6
elif time_precision == 'n':
return seconds * 1e9
| |
import pytest
from awx.api.versioning import reverse
from awx.main.models.mixins import WebhookTemplateMixin
from awx.main.models.credential import Credential, CredentialType
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 200),
('org admin', 200),
('jt admin', 200),
('jt execute', 403),
('org member', 403),
]
)
def test_get_webhook_key_jt(organization_factory, job_template_factory, get, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else jt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk})
response = get(url, user=user, expect=expect)
if expect < 400:
assert response.data == {'webhook_key': ''}
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 200),
('org admin', 200),
('jt admin', 200),
('jt execute', 403),
('org member', 403),
]
)
def test_get_webhook_key_wfjt(organization_factory, workflow_job_template_factory, get, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else wfjt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk})
response = get(url, user=user, expect=expect)
if expect < 400:
assert response.data == {'webhook_key': ''}
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 201),
('org admin', 201),
('jt admin', 201),
('jt execute', 403),
('org member', 403),
]
)
def test_post_webhook_key_jt(organization_factory, job_template_factory, post, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else jt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk})
response = post(url, {}, user=user, expect=expect)
if expect < 400:
assert bool(response.data.get('webhook_key'))
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 201),
('org admin', 201),
('jt admin', 201),
('jt execute', 403),
('org member', 403),
]
)
def test_post_webhook_key_wfjt(organization_factory, workflow_job_template_factory, post, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else wfjt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk})
response = post(url, {}, user=user, expect=expect)
if expect < 400:
assert bool(response.data.get('webhook_key'))
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert (jt.webhook_service, jt.webhook_key) == ('', '')
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_service': service}, user=admin, expect=200)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_unset_webhook_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_service': ''}, user=admin, expect=200)
jt.refresh_from_db()
assert (jt.webhook_service, jt.webhook_key) == ('', '')
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_credential(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_credential': cred.pk}, user=admin, expect=200)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential == cred
@pytest.mark.django_db
@pytest.mark.parametrize(
"service,token", [
(s, WebhookTemplateMixin.SERVICES[i - 1][0]) for i, (s, _) in enumerate(WebhookTemplateMixin.SERVICES)
]
)
def test_set_wrong_service_webhook_credential(organization_factory, job_template_factory, patch, service, token):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(token)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential is None
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_credential_without_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == ''
assert jt.webhook_key == ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == ''
assert jt.webhook_key == ''
assert jt.webhook_credential is None
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_unset_webhook_service_with_credential(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
jt.webhook_credential = cred
jt.save()
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_service': ''}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential == cred
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
| |
from datetime import datetime
from functools import partial
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
from elasticutils import F
from freezegun import freeze_time
from demo_esutils.models import Category
from demo_esutils.models import Article
from demo_esutils.models import User
from demo_esutils.mappings import ArticleMappingType as M
from django_esutils.filters import ElasticutilsFilterSet
class BaseTest(TestCase):
fixtures = ['test_data']
def setUp(self):
super(BaseTest, self).setUp()
self.louise = User.objects.get(pk=2)
self.florent = User.objects.get(pk=1)
self. search_fields = ['author.username',
'author.email',
'category.id',
'category.name',
'created_at',
'subject',
'content',
'status',
'contributors',
'library',
'library.name',
'library.number_of_books',
'q',
's',
'trololo']
self.mapping_type = M
M.update_mapping()
M.run_index_all()
M.refresh_index()
def tearDown(self):
User.objects.all().delete()
Category.objects.all().delete()
Article.objects.all().delete()
M.refresh_index()
def freezed_time(self, *args):
return make_aware(datetime(*args), get_current_timezone())
class MappingTestCase(BaseTest):
def test_index(self):
# keep previous indexed objec count
prev_count = M.count()
# create an article
category = Category.objects.create(name='The Tests')
article1 = Article()
article1.author = self.florent
article1.category = category
article1.content = '!'
article1.subject = 'make it works'
article2 = Article()
article2.author = self.louise
article2.category = category
article2.content = 'yo'
article2.subject = 'My amazing article'
for i, art in enumerate([article1, article2]):
# save
art.save()
# refresh index
M.refresh_index()
# check added
add_count = M.count()
self.assertEqual(add_count, prev_count + i + 1)
for i, a in enumerate([article1, article2]):
# remove an article
a.delete()
# refresh index
M.refresh_index()
# check removed
del_count = M.count()
self.assertEqual(del_count, add_count - i - 1)
def test_queryset_update(self):
# update some contents
self.assertEqual(M.query(subject__prefix='amaz').count(), 1)
Article.objects.filter(pk=3).update(subject='hey #tgif')
# reindex all
M.run_index_all()
# refresh index
M.refresh_index()
# should
self.assertEqual(M.query(subject__prefix='amaz').count(), 0)
self.assertEqual(M.query(subject__match='#tgif').count(), 1)
# update some contents
self.assertEqual(M.query(content__term='yo').count(), 1)
Article.objects.filter(pk=3).update(content='monday uh!')
# refresh index
M.refresh_index()
self.assertEqual(M.query(content__term='yo').count(), 0)
self.assertEqual(M.query(content__term='monday').count(), 1)
def test_query_string(self):
# Match
self.assertEqual(M.query(subject__match='WorkS').count(), 1)
self.assertEqual(M.query(subject__match='works').count(), 1)
self.assertEqual(M.query(subject__match='amaz').count(), 0)
self.assertEqual(M.query(**{'author.username__match': 'Louise'}).count(), 2) # noqa
# Match phrase
self.assertEqual(M.query(subject__match_phrase='make it ').count(), 1)
self.assertEqual(M.query(subject__match_phrase='make i ').count(), 0)
# Prefix
self.assertEqual(M.query(subject__prefix='amaz').count(), 1)
self.assertEqual(M.query(**{'author.username__prefix': 'lo'}).count(), 2) # noqa
self.assertEqual(M.query(**{'category.name__prefix': 'tes'}).count(), 2) # noqa
# Term
self.assertEqual(M.query(**{'category.name__term': 'tes'}).count(), 0)
self.assertEqual(M.query(**{'category.name__term': 'tests'}).count(), 2) # noqa
# Terms
self.assertEqual(M.query(**{'category.name__terms': ['tests', 'category']}).count(), 3) # noqa
# in
self.assertEqual(M.query(**{'category.name__in': ['tests', 'category']}).count(), 3) # noqa
@freeze_time('2014-10-16 16:19:20')
def test_query_range(self):
self.assertEqual(M.query(status__gt=0).count(), 3)
self.assertEqual(M.query(status__gte=0).count(), 4)
self.assertEqual(M.query(status__lt=2).count(), 2)
self.assertEqual(M.query(status__lte=2).count(), 3)
self.assertEqual(M.query(**{'status__gt': 1, 'status__lte': 3}).count(), 2) # noqa
self.assertEqual(M.query(**{'status__range': [1, 2]}).count(), 2) # noqa
# in
self.assertEqual(M.query(**{'status__in': [1, 2]}).count(), 2) # noqa
# date range
query_date = self.freezed_time(2014, 10, 16, 16, 19, 20)
self.assertEqual(M.query(**{'created_at__lt': query_date}).count(), 1)
@freeze_time('2014-10-17 16:19:20')
def test_query_fuzzy(self):
# http://elasticutils.readthedocs.org/en/latest/api.html?highlight=fuzzy # noqa
#self.assertEqual(M.query(status__fuzzy=(1, 1)).count(), 3)
query_date = self.freezed_time(2014, 10, 17, 16, 19, 20)
self.assertEqual(M.query(created_at__fuzzy=(query_date, '1d')).count(), 2) # noqa
self.assertEqual(M.query(subject__fuzzy='works').count(), 1) # noqa
self.assertEqual(M.query(**{'category.name__fuzzy': 'tests'}).count(), 2) # noqa
def test_query_wild_card(self):
self.assertEqual(M.query(subject__wildcard='ma?e').count(), 1)
self.assertEqual(M.query(subject__wildcard='a?ing').count(), 0)
self.assertEqual(M.query(subject__wildcard='a*ing').count(), 1)
class FilterTestCase(BaseTest):
def test_filter_term_string(self):
search_terms = {'subject': 'amazing'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
# Test formed filter
subject_filter = filter_set.get_filter('subject', 'amazing').__repr__()
self.assertEqual(F(**{'subject': 'amazing'}).__repr__(), subject_filter) # noqa
filtered_qs = filter_set.qs
self.assertEqual(filtered_qs.count(), 1)
def test_filter_prefix_or_startswith(self):
default_action = 'prefix'
search_terms = {'category.name': 'tes'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=default_action)
self.assertEqual(filter_set.qs.count(), 2)
search_actions = {'category.name': 'prefix'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
subject_filter = filter_set.get_filter('category.name', 'tes').__repr__() # noqa
self.assertEqual(F(**{'category.name__prefix': 'tes'}).__repr__(), subject_filter) # noqa
default_action = 'startswith'
search_terms = {'category.name': 'tes'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=default_action)
self.assertEqual(filter_set.qs.count(), 2)
self.assertEqual(filter_set.count, 2)
search_actions = {'category.name': 'startswith'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action='prefix')
self.assertEqual(filter_set.qs.count(), 2)
self.assertEqual(filter_set.count, 2)
subject_filter = filter_set.get_filter('category.name', 'tes').__repr__() # noqa
self.assertEqual(F(**{'category.name__startswith': 'tes'}).__repr__(), subject_filter) # noqa
def test_filter_nested(self):
search_terms = {'contributors': ['louise']}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 2)
filters = query.build_search()
self.assertEqual(filters['filter'], filter_set._get_filter_nested_item('contributors', 'louise')) # noqa
search_terms = {'contributors': ['louise', 'florent']}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 1)
def test_filter_ids(self):
filter_set_func = partial(ElasticutilsFilterSet,
search_fields=self.search_fields,
search_actions=None,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
filter_set = filter_set_func(search_terms={'ids': [1, 2]})
query = filter_set.qs
self.assertEqual(query.count(), 2)
ids_filter = query.build_search()['filter']
self.assertEqual(ids_filter, filter_set.get_filter_ids([1, 2]))
# invalid list
filter_set = filter_set_func(search_terms={'ids': ['']})
query = filter_set.qs
self.assertEqual(query.count(), 0)
# invalid list
filter_set = filter_set_func(search_terms={'ids': ['pouet']})
query = filter_set.qs
self.assertEqual(query.count(), 0)
def test_filter_missing(self):
search_terms = {'contributors': None}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 1)
search_terms = {'library': None}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 2)
search_terms = {'category.name': None}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 0)
article = Article()
article.author = self.louise
article.content = 'yo'
article.subject = 'Article without cathegory'
article.save()
# refresh index
M.refresh_index()
search_terms = {'category.name': None}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 1)
def test_filter_multiple_fields(self):
search_terms = {'ids': [1, 2],
'contributors': ['louise', 'florent'],
'category.name': 'tes'}
search_actions = {'category.name': 'startswith'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 0)
ids_filter = query.build_search()['filter']
search_terms = {'subject': 'amazing',
'category.name': 'tes'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(query.count(), 1)
def test_filter_range_and_in(self):
search_terms = {'status': 0}
search_actions = {'status': 'gt'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 3)
search_terms = {'status': 0}
search_actions = {'status': 'gte'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 4)
search_terms = {'status': 1}
search_actions = {'status': 'lt'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 1)
search_terms = {'status': 1}
search_actions = {'status': 'lte'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 2)
search_terms = {'status': [1, 2]}
search_actions = {'status': 'range'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 2)
# in
search_terms = {'status': [1, 2]}
search_actions = {'status': 'in'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=search_actions,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
self.assertEqual(filter_set.qs.count(), 2)
def test_filter_all(self):
search_terms = {'q': 'amaz'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None)
query = filter_set.qs
self.assertEqual(filter_set.qs.count(), 1)
search_terms = {'s': 'amaz'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None,
all_filter='s')
query = filter_set.qs
self.assertEqual(filter_set.qs.count(), 1)
search_terms = {'trololo': 'amaz'}
filter_set = ElasticutilsFilterSet(search_fields=self.search_fields,
search_actions=None,
search_terms=search_terms,
mapping_type=self.mapping_type,
queryset=M.query(),
default_action=None,
all_filter='trololo')
query = filter_set.qs
self.assertEqual(filter_set.qs.count(), 1)
"""
def test_filter_distance(self):
# TODO
"""
class ViewBackendTestCase(BaseTest):
def test_filter_on_inner_object(self):
response = self.client.get(reverse('rest_article_list')+'?library.name=library') # noqa
self.assertEqual(len(response.data), 2)
response = self.client.get(reverse('rest_article_list')+'?library.id=1') # noqa
self.assertEqual(len(response.data), 1)
def test_missing(self):
response = self.client.get(reverse('rest_article_list')+'?contributors[]=') # noqa
self.assertEqual(len(response.data), 1)
def test_all_view(self):
response = self.client.get(reverse('rest_article_list')+'?q=amaz')
self.assertEqual(len(response.data), 1)
response = self.client.get(reverse('s_rest_list')+'?trololo=amaz') # noqa
self.assertEqual(len(response.data), 1)
def test_all_exact_word(self):
response = self.client.get(reverse('rest_article_list')+'?q=amazing')
self.assertEqual(len(response.data), 1)
| |
import os
import ply.lex as lex
import ply.yacc as yacc
from .lexer import *
from .nodes import *
from . import report
start = "spec_file"
def push_parent(p, node):
parents = getattr(p.parser, "parents")
parents.append(node)
def pop_parent(p):
parents = getattr(p.parser, "parents")
top = parents[-1]
parents.pop()
def add_node(p, node, index=1):
parents = getattr(p.parser, "parents")
top = parents[-1]
node.parent = top
node.location = Location(p.lexer.filename,
p.lexer.lineno,
find_column(p.lexer.lexdata, p, index))
top.children.append(node)
return node
def p_expr_list_first(p):
''' expr_list : expr
'''
p[0] = [p[1]]
return p
def p_expr_list_rest(p):
''' expr_list : expr_list COMMA expr
'''
p[1].append(p[3])
p[0] = p[1]
return p
def p_list_literal(p):
''' list_literal : LBRACKET expr_list RBRACKET
'''
p[0] = add_node(p, ListLiteral(p[2]), 2)
return p
def p_list_literal_empty(p):
''' list_literal : LBRACKET RBRACKET
'''
p[0] = add_node(p, ListLiteral([]))
return p
def p_literal_bool(p):
''' literal : BOOLEAN
'''
p[0] = add_node(p, BoolLiteral(p[1]))
return p
def p_literal_int(p):
''' literal : INTEGER
'''
p[0] = add_node(p, IntLiteral(p[1]))
return p
def p_literal_float(p):
''' literal : FLOAT
'''
p[0] = add_node(p, FloatLiteral(p[1]))
return p
def p_literal_chr(p):
''' literal : CHRLIT
'''
p[0] = add_node(p, CharLiteral(p[1]))
return p
def p_literal_str(p):
''' literal : STRLIT
'''
p[0] = add_node(p, StringLiteral(p[1]))
return p
def p_literal_null(p):
''' literal : NULL
'''
p[0] = add_node(p, NullLiteral())
return p
def p_literal_list(p):
''' literal : list_literal
'''
p[0] = p[1]
return p
def p_expr(p):
''' expr : literal
'''
p[0] = p[1]
return p
def p_expr_call(p):
''' expr : IDENT LPAREN RPAREN
'''
p[0] = add_node(p, Call(p[1]))
return p
def p_primitive_type(p):
''' primitive_type : BOOL
| FLOAT
| INT
| STRING
'''
p[0] = p[1]
return p
def p_data_type_prim(p):
''' data_type : primitive_type
'''
p[0] = add_node(p, PrimitiveType(p[1]))
return p
def p_data_type_unresolved(p):
''' data_type : IDENT
'''
p[0] = add_node(p, UnresolvedType(p[1]))
return p
def p_option_decl(p):
''' option_decl : IDENT
'''
p[0] = add_node(p, Option(p[1], None))
push_parent(p, p[0])
return p
def p_option(p):
''' option : option_decl COLON expr SEMICOLON
'''
p[0] = p[1]
p[0].value = p[3]
pop_parent(p)
return p
def p_option_list_first(p):
''' option_list : option
'''
p[0] = [p[1]]
return p
def p_option_list_rest(p):
''' option_list : option_list option
'''
p[1].append(p[2])
p[0] = p[1]
return p
def p_extern(p):
''' extern : EXTERN IDENT LBRACE option_list RBRACE
'''
p[0] = add_node(p, ExternTypeDef(p[2], p[4]))
for opt in p[4]:
opt.parent = p[0]
p[0].children.append(opt)
return p
def p_target_item_option(p):
''' target_item : option
'''
p[0] = p[1]
return p
def p_target_item_extern(p):
''' target_item : extern
'''
p[0] = p[1]
return p
def p_target_item_list_first(p):
''' target_item_list : target_item
'''
p[0] = [p[1]]
return p
def p_target_item_list_rest(p):
''' target_item_list : target_item_list target_item
'''
p[1].append(p[2])
p[0] = p[1]
return p
def p_target_decl(p):
''' target_decl : TARGET IDENT
'''
p[0] = add_node(p, Target(p[2]))
push_parent(p, p[0])
return p
def p_target(p):
''' target : target_decl LBRACE target_item_list RBRACE
'''
p[0] = p[1]
for item in p[3]:
if isinstance(item, Option):
p[0].options.append(item)
elif isinstance(item, ExternTypeDef):
p[0].externs.append(item)
else: # should be/is disallowed by parsing rules
report.error("unexpected %s in codegen target, " % item.__class__.__name__ +
"only options and extern types are allowed", item.location)
p[0].children.append(item)
item.parent = p[0]
pop_parent(p)
return p
def p_visitor_decl(p):
''' visitor_decl : VISITOR IDENT
'''
p[0] = add_node(p, Visitor(p[2]))
push_parent(p, p[0])
return p
def p_visitor(p):
''' visitor : visitor_decl LBRACE option_list RBRACE
'''
p[0] = p[1]
p[0].options = p[3]
pop_parent(p)
return p
def p_visitor_empty(p):
''' visitor : visitor_decl LBRACE RBRACE
'''
p[0] = p[1]
return p
def p_root(p):
''' root : ROOT IDENT SEMICOLON
'''
p[0] = add_node(p, RootSpec(None))
push_parent(p, p[0])
p[0].type = add_node(p, UnresolvedType(p[2]), 2)
pop_parent(p)
return p
def p_field_specifier(p):
''' field_specifier : WEAK
| LIST
'''
p[0] = p[1]
return p
def p_field_specifier_list_first(p):
''' field_specifier_list : field_specifier
'''
p[0] = [p[1]]
return p
def p_field_specifier_list_rest(p):
''' field_specifier_list : field_specifier_list field_specifier
'''
p[1].append(p[2])
p[0] = p[1]
return p
def p_field_decl(p):
''' field_decl : IDENT
'''
p[0] = add_node(p, Field(type=None, name=p[1]))
return p
def p_field_decl_init(p):
''' field_decl : IDENT EQUAL expr
'''
p[0] = add_node(p, Field(type=None, name=p[1], default=p[3]))
push_parent(p, p[0])
add_node(p, p[3])
pop_parent(p)
return p
def p_field_decl_list_first(p):
''' field_decl_list : field_decl
'''
p[0] = [p[1]]
return p
def p_field_decl_list_rest(p):
''' field_decl_list : field_decl_list COMMA field_decl
'''
p[1].append(p[3])
p[0] = p[1]
return p
def p_field_type(p):
''' field_type : field_specifier_list data_type
'''
is_weak = True if "weak" in p[1] else False
is_list = True if "list" in p[1] else False
p[0] = add_node(p, FieldType(p[2], is_weak=is_weak))
push_parent(p, p[0])
if is_list:
let = add_node(p, ListElementType(None, is_weak=is_weak))
push_parent(p, let)
let.type = add_node(p, p[2], 2)
pop_parent(p)
p[0].type = let
else:
p[0].type = add_node(p, p[2], 2)
pop_parent(p)
return p
def p_field_type_no_spec(p):
''' field_type : data_type
'''
p[0] = add_node(p, FieldType(p[1]))
return p
def p_fields(p):
''' fields : field_type field_decl_list SEMICOLON
'''
p[0] = []
for field in p[2]:
push_parent(p, field)
field.type = add_node(p, p[1])
pop_parent(p)
p[0].append(field)
return p
def p_arg_list_first(p):
''' arg_list : IDENT
'''
p[0] = [p[1]]
return p
def p_arg_list_rest(p):
''' arg_list : arg_list COMMA IDENT
'''
p[1].append(p[3])
p[0] = p[1]
return p
def p_ctor_ident(p):
''' ctor_ident : IDENT
'''
p[0] = add_node(p, Constructor(p[1],[]))
push_parent(p, p[0])
def p_ctor_no_args(p):
''' ctor : ctor_ident LPAREN RPAREN SEMICOLON
'''
p[0] = p[1]
return p
def p_ctor_with_args(p):
''' ctor : ctor_ident LPAREN arg_list RPAREN SEMICOLON
'''
p[0] = p[1]
p[0].args = p[3]
pop_parent(p)
return p
def p_node_specifier(p):
''' node_specifier : ABSTRACT
'''
p[0] = p[1]
return p
def p_node_specifier_list_first(p):
''' node_specifier_list : node_specifier
'''
p[0] = [p[1]]
return p
def p_node_specifier_list_rest(p):
''' node_specifier_list : node_specifier_list node_specifier
'''
p[1].append(p[2])
return p
def p_node_base(p):
''' node_base : COLON IDENT
'''
p[0] = add_node(p, UnresolvedType(p[2], is_weak=True), 2)
return p
def p_node_item_fields(p):
''' node_item : fields
'''
p[0] = p[1]
return p
def p_node_item_ctor(p):
''' node_item : ctor
'''
p[0] = p[1]
return p
def p_node_item_list_first(p):
''' node_item_list : node_item
'''
p[0] = [p[1]]
return p
def p_node_item_list_rest(p):
''' node_item_list : node_item_list node_item
'''
p[1].append(p[2])
p[0] = p[1]
return p
def p_node_type(p):
''' node_type : node_specifier_list NODE
'''
p[0] = p[1]
return p
def p_node_type_no_spec(p):
''' node_type : NODE
'''
p[0] = []
return p
def p_node_declarator(p):
''' node_declarator : node_type IDENT
'''
abstract = True if "abstract" in p[1] else False
p[0] = add_node(p, Node(p[2], None, is_abstract=abstract))
push_parent(p, p[0])
return p
def p_node_block(p):
''' node_block : LBRACE node_item_list RBRACE
'''
p[0] = p[2]
return p
def p_node_block_empty(p):
''' node_block : LBRACE RBRACE
'''
p[0] = []
return p
def p_node(p):
''' node : node_declarator node_block
'''
p[0] = p[1]
for item in p[2]:
if isinstance(item, Field):
p[0].fields.append(item)
elif isinstance(item, Constructor):
p[0].ctrs.append(item)
elif all(isinstance(e, Field) for e in item):
p[0].fields.extend(item)
else: # should be/is disallowed by parsing rules
report.error("unexpected item in %s in node " % item.__class__.__name__ +
"definition, only Fields and Constructors are allowed")
p[0].base = None
pop_parent(p)
return p
def p_node_with_base(p):
''' node : node_declarator node_base node_block
'''
p[0] = p[1]
for item in p[3]:
if isinstance(item, Field):
p[0].fields.append(item)
elif isinstance(item, Constructor):
p[0].ctrs.append(item)
elif all(isinstance(e, Field) for e in item):
p[0].fields.extend(item)
else: # should be/is disallowed by parsing rules
report.error("unexpected item in %s in node " % item.__class__.__name__ +
"definition, only Fields and Constructors are allowed")
p[0].base = p[2]
pop_parent(p)
return p
def p_spec_file_item_target(p):
''' spec_file_item : target
'''
p[0] = p[1]
return p
def p_spec_file_item_visitor(p):
''' spec_file_item : visitor
'''
p[0] = p[1]
return p
def p_spec_file_item_root(p):
''' spec_file_item : root
'''
p[0] = p[1]
return p
def p_spec_file_item_node_spec(p):
''' spec_file_item : node
'''
p[0] = p[1]
return p
def p_spec_file_item_list_first(p):
''' spec_file_item_list : spec_file_item
'''
p[0] = [p[1]]
return p
def p_spec_file_item_list_rest(p):
''' spec_file_item_list : spec_file_item_list spec_file_item
'''
p[1].append(p[2])
p[0] = p[1]
return p
#
# FIXME: move this type resolution stuff into a separate module/NodeVisitor class
#
def find_extern_types(spec, types):
for target in spec.targets:
for extern in target.externs:
if extern.name in types:
types[extern.name].target_types.append(extern)
else:
types[extern.name] = ExternType(extern.name, [extern])
def find_node_types(spec, types):
for node in spec.nodes:
if node.name not in types:
types[node.name] = node
else:
report.error("duplicate node type %s" % node.name, fatal=False, location=node.location)
report.note("previous definition was here", fatal=True, location=types[node.name].location)
def resolve_node_fields(node, types):
for field in node.fields:
tp = field.type.type
if isinstance(tp, UnresolvedType):
if tp.name in types:
field.type.type = types[field.type.type.name]
else:
report.error("unresolved field type %s" % tp.name, field.location)
def resolve_node_base(node, types):
if isinstance(node.base, UnresolvedType):
if node.base.name in types:
node.base = types[node.base.name]
else:
report.error("unresolved base node type %s" % node.base.name, node.base.location)
def resolve_node_types(spec, types):
for node in spec.nodes:
resolve_node_fields(node, types)
resolve_node_base(node, types)
def resolve_root_spec(spec, types):
if spec.root and isinstance(spec.root.type, UnresolvedType):
if spec.root.type.name in types:
spec.root.type = types[spec.root.type.name]
else:
report.error("unresolved root node type %s" % spec.root.type.name, spec.root.location)
def resolve_list_types(spec, types):
for node in spec.nodes:
for field in node.fields:
if isinstance(field.type.type, ListElementType):
if isinstance(field.type.type.type, UnresolvedType):
if field.type.type.type.name in types:
field.type.type.type = types[field.type.type.type.name]
else:
report.error("unresolved list node type %s" % field.type.type.name, field.type.location)
def resolve_types(spec):
types = {}
find_extern_types(spec, types)
find_node_types(spec, types)
resolve_node_types(spec, types)
resolve_root_spec(spec, types)
resolve_list_types(spec, types)
return types
def p_spec_file(p):
''' spec_file : spec_file_item_list
'''
p[0] = getattr(p.parser, "parents")[0]
for item in p[1]:
if isinstance(item, Target):
p[0].targets.append(item)
elif isinstance(item, Visitor):
p[0].visitors.append(item)
elif isinstance(item, RootSpec):
p[0].root = item
elif isinstance(item, Node):
p[0].nodes.append(item)
p[0].types = resolve_types(p[0])
return p
def p_error(t):
location = Location(t.lexer.filename, t.lexer.lineno, find_column(t.lexer.lexdata, t))
report.error('invalid syntax', location)
def parse(file, filename, debug=True):
lexer = lex.lex(debug=True) if debug \
else lex.lex(debug=False, errorlog=lex.NullLogger())
if file is not None:
lexer.input(file.read())
else:
with open(filename, 'r') as f:
lexer.input(f.read())
setattr(lexer, "filename", filename)
parser = yacc.yacc(debug=True) if debug \
else yacc.yacc(debug=False, errorlog=yacc.NullLogger())
parents = []
spec = SpecFile(filename)
spec.parent = None
parents.append(spec)
setattr(parser, "parents", parents)
spec = parser.parse(lexer=lexer, tracking=True)
return spec
| |
#!/usr/bin/python
# Copyright (c) 2006-2013, 2015 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Usage:
#
# $ ./make_new_branch.py --help
#
# Also:
#
# $ ./make_new_branch.py |& tee 2012.08.03.make_new_branch.txt
#
'''
# 2012.08.08: Making leafy branch is finally pretty quick and easy!
# # Script completed in 11.93 mins.
./make_new_branch.py \
-U landonb --no-password \
--new-branch-name 'Metc Bikeways 2012' \
--last-merge-rid 14124 \
--callback-class 'MetC_Bikeways_Defs'
./make_new_branch.py \
-U landonb --no-password \
--new-branch-name 'Metc Bikeways 2017' \
--last-merge-rid 14124 \
--callback-class MetC_Bikeways_Defs \
--tile-skins bikeways \
--owners landonb landonb \
--arbiters landonb mekhyl torre \
--editors terveen landonb mekhyl torre
/* 2014.07.02: Add individual member: */
-- psql -U cycling ccpv3_live
BEGIN TRANSACTION;
SELECT cp_group_membership_new(
cp_user_id('someone'), -- IN user_id_ INTEGER
'someone', -- IN username_ TEXT
cp_branch_baseline_id(), -- IN branch_baseline_id INTEGER
1, -- IN rid_beg INTEGER
cp_rid_inf(), -- IN rid_inf INTEGER
cp_group_shared_id('Metc Bikeways 2012 Editors'),
-- IN group_id_ INTEGER
cp_access_level_id('editor')); -- IN access_level_id_ INTEGER
COMMIT;
# And update the reports file:
$ cd /ccp/dev/cycloplan_live/htdocs/reports
$ htpasswd .htpasswd 'someone'
'''
script_name = ('Make New Branch')
script_version = '1.0'
__version__ = script_version
__author__ = 'Cyclopath <info@cyclopath.org>'
__date__ = '2012-08-03'
# ***
# SYNC_ME: Search: Scripts: Load pyserver.
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../util'
% (os.path.abspath(os.curdir),)))
import pyserver_glue
import conf
import g
import logging
from util_ import logging2
from util_.console import Console
log_level = logging.DEBUG
#log_level = logging2.VERBOSE2
#log_level = logging2.VERBOSE4
#log_level = logging2.VERBOSE
conf.init_logging(True, True, Console.getTerminalSize()[0]-1, log_level)
log = g.log.getLogger('make_new_branch')
# ***
import copy
from decimal import Decimal
import gc
import psycopg2
import socket
import time
import traceback
from grax.access_infer import Access_Infer
from grax.access_level import Access_Level
from grax.access_scope import Access_Scope
from grax.access_style import Access_Style
from grax.grac_manager import Grac_Manager
from grax.item_manager import Item_Manager
from grax.user import User
from gwis.exception.gwis_warning import GWIS_Warning
from gwis.query_branch import Query_Branch
from item import item_base
from item import item_versioned
from item import link_value
from item.attc import attribute
from item.feat import branch
from item.feat import byway
from item.feat import node_endpoint
from item.feat import node_byway
from item.feat import node_traverse
from item.feat import route
from item.grac import group
from item.grac import group_membership
from item.link import link_attribute
from item.link import link_tag
from item.util import ratings
from item.util import revision
from item.util.item_type import Item_Type
from util_ import db_glue
from util_ import geometry
from util_ import gml
from util_ import misc
from util_.log_progger import Debug_Progress_Logger
from util_.script_args import Ccp_Script_Args
from util_.script_base import Ccp_Script_Base
from new_item_policy_init import New_Item_Policy_Init
from node_cache_maker import Node_Cache_Maker
# *** Debug switches
debug_prog_log = Debug_Progress_Logger()
debug_prog_log.debug_break_loops = False
#debug_prog_log.debug_break_loops = True
#debug_prog_log.debug_break_loop_cnt = 3
##debug_prog_log.debug_break_loop_cnt = 10
debug_skip_commit = False
#debug_skip_commit = True
# This is shorthand for if one of the above is set.
debugging_enabled = ( False
or debug_prog_log.debug_break_loops
or debug_skip_commit
)
# *** Cli arg. parser
class ArgParser_Script(Ccp_Script_Args):
#
def __init__(self):
Ccp_Script_Args.__init__(self, script_name, script_version)
#
self.groups_none_use_public = True
#
def prepare(self):
Ccp_Script_Args.prepare(self)
# Desired operation.
self.add_argument('--update-branch', dest='update_branch',
action='store', default=None, type=str,
help='the name or ID of the existing branch to update')
#
self.add_argument('--new-branch-name', dest='new_branch_name',
action='store', default='', type=str,
help='the name of the new branch')
#
self.add_argument('--purge-branch', dest='purge_branch',
action='store_true', default=False,
help='delete all traces of the branch instead of creating it')
# For new branches.
#
# Either:
self.add_argument('--last-merge-rid', dest='last_merge_rid',
action='store', default=0, type=int,
help='the last merge revision ID, or Current if not specified')
# or:
self.add_argument('--is-basemap', dest='is_basemap',
action='store_true', default=False,
help='make a parenty branch rather than a leafy branch')
# For new and existing branches.
#
self.add_argument('--callback-class', dest='callback_class',
action='store', default='', type=str,
help='classname of the branches module w/ process_import/_export')
#
self.add_argument('--tile-skins', dest='tile_skins',
# Note: default is None, not [], so that --tile-skins all alone means
# to clear list of skins, so no tiles will be generated.
action='store', default=None, type=str, nargs='*',
help='a list of skins to use to make tiles for branch')
#
self.add_argument('--owners', dest='owners',
action='store', default=[], type=str, nargs='*',
help='a list of usernames to add to branch owners group')
self.add_argument('--arbiters', dest='arbiters',
action='store', default=[], type=str, nargs='*',
help='a list of usernames to add to branch arbiters group')
self.add_argument('--editors', dest='editors',
action='store', default=[], type=str, nargs='*',
help='a list of usernames to add to branch editors group')
#
# FIXME: Implement: Create new database from schema dump and populate
# standard tables.
"""
self.add_argument('--install-db', dest='install_db',
action='store', default=False, type=str,
help='install a fresh Cyclopath instead to the named database')
# FIXME: Implement:
self.add_argument('--user-owner', dest='user_owner',
action='store', default=False, type=str,
help='create the named user and make a basemap owner')
"""
#
def verify_handler(self):
ok = Ccp_Script_Args.verify_handler(self)
if self.cli_opts.username == conf.anonymous_username:
log.error('Please specify a real username (no anonymous cowards).')
ok = False
op_count = ( (1 if (self.cli_opts.update_branch is not None) else 0)
+ (1 if (self.cli_opts.new_branch_name) else 0)
+ (1 if (self.cli_opts.purge_branch) else 0))
if op_count != 1:
log.error(
'Specify one: --update-branch, --new-branch-name, or --purge-branch.')
ok = False
if self.cli_opts.purge_branch:
if not self.cli_opts.branch:
log.error('Please use --branch to specify the branch to purge.')
ok = False
if self.cli_opts.last_merge_rid and self.cli_opts.is_basemap:
log.error('Please specify either --last-merge-rid or --is-basemap.')
ok = False
return ok
# *** Make_New_Branch
class Make_New_Branch(Ccp_Script_Base):
__slots__ = (
'the_branch',
'owner_group',
'arbiter_group',
'editor_group',
'sid_owners',
'sid_arbiters',
'sid_editors',
'current_qb',
)
# *** Constructor
def __init__(self):
Ccp_Script_Base.__init__(self, ArgParser_Script)
#
self.the_branch = None
# The branch group.One() object.
self.owner_group = None
self.arbiter_group = None
self.editor_group = None
# The branch group stack IDs.
self.sid_owners = 0
self.sid_arbiters = 0
self.sid_editors = 0
#
self.current_qb = None
# ***
# This script's main() is very simple: it makes one of these objects and
# calls go(). Our base class reads the user's command line arguments and
# creates a query_builder object for us at self.qb before thunking to
# go_main().
#
def go_main(self):
# Skipping: Ccp_Script_Base.go_main(self)
do_commit = False
try:
if self.cli_opts.update_branch is None:
if not self.cli_opts.last_merge_rid:
# Make sure we're being run from a terminal.
# Run from cron, $TERM is not set. Run from bash, it's 'xterm'.
if ((os.environ.get('TERM') != "dumb")
and (os.environ.get('TERM') is not None)):
print '\nPlease confirm the last_merge_rid.\n'
self.cli_opts.last_merge_rid = self.ask_question(
'last_merge_rid',
revision.Revision.revision_max(self.qb.db),
the_type=int)
# else, not interactive and no --last-merge-rid, so we'll
# just use the new revision ID that's claimed when
# we create the branch.
log.debug('go_main: getting exclusive revision lock...')
revision.Revision.revision_lock_dance(
self.qb.db, caller='make_new_branch.py')
g.assurt((self.qb.locked_tables == ['revision',])
or (self.qb.cp_maint_lock_owner))
log.debug('go_main: database is locked.')
# MAYBE: There seems to be an awful lot of boilerplate code here.
self.qb.grac_mgr = Grac_Manager()
self.qb.grac_mgr.prepare_mgr('user', self.qb)
# The script should be run be a real developer-user.
g.assurt(self.qb.username
and (self.qb.username != conf.anonymous_username))
self.qb.user_group_id = User.private_group_id(self.qb.db,
self.qb.username)
# Get a new revision ID.
self.qb.item_mgr.start_new_revision(self.qb.db)
log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))
#import pdb;pdb.set_trace()
#fixme: is there a way to setup the basemap for new Ccp installs? or in
#general? like, we do not need to clone node IDs... or, okay, there are two
#cases: one is for CcpV1->V2, like, what script to call after running SQL
#scripts; second case is fresh Ccp installs, after users initializes database,
#they will have to create groups and branch and memberships and nips...
if self.cli_opts.update_branch is not None:
self.update_branch()
self.load_groups()
self.add_members()
self.init_newips()
elif self.cli_opts.new_branch_name:
self.make_groups()
self.make_branch()
self.add_members()
self.init_newips()
self.clone_nodes()
else:
g.assurt(self.cli_opts.purge_branch)
self.purge_branch()
# FIXME: Is this correct? Or should username be _script name?
# Save the new revision and finalize the sequence numbers.
group_names_or_ids = ['Public',]
changenote = ('%s branch "%s"'
% ('Updated' if (self.cli_opts.update_branch is not None)
else 'Created new',
self.qb.branch_hier[0][2],))
self.finish_script_save_revision(group_names_or_ids,
self.qb.username,
changenote)
if debug_skip_commit:
raise Exception('DEBUG: Skipping commit: Debugging')
do_commit = True
except Exception, e:
log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
finally:
self.cli_args.close_query(do_commit)
# ***
#
def query_builder_prepare(self):
Ccp_Script_Base.query_builder_prepare(self)
self.qb.filters.skip_geometry_raw = True
self.qb.filters.skip_geometry_svg = True
self.qb.filters.skip_geometry_wkt = False
#
#revision.Revision.revision_lock_dance(
# self.qb.db, caller='make_new_branch.py')
# ***
#
def purge_branch(self):
log.info('Purging all traces of the branch!')
# This fcn. is Very Destructive. Double-check that the user isn't being
# dumb.
yes = self.ask_yes_no(
'Are you really, really, REALLY sure you want to do this?')
if yes:
self.really_purge_branch_really()
#
def really_purge_branch_really(self):
time_0 = time.time()
log.info('For real purging all traces of the branch!')
log.warning('FIXME: This fcn. is not tested all that well.')
log.debug('purge_branch_: Acquiring revision table lock...')
revision.Revision.revision_lock_dance(
self.qb.db, caller='make_new_branch.py')
# MAYBE: 2012.08.03: This code brute-forces the removal. It's from
# some copy-n-paste code [lb] has been using (i.e., from a psql
# command line) so it's not all that well tested. One concern is that
# there might be violations of foreign key constraints...
tables = [
#
'new_item_policy',
#
'node_byway',
'node_endpoint',
#
# FIXME: Delete from route_id where route_id...
'route',
#
'attribute',
'tag', # Should be empty for leafy branches, anyway...
'post',
'thread',
'annotation',
'attribute',
'attachment',
'geofeature',
'link_value',
#
'tag_preference',
#
'merge_job',
'route_analysis_job',
# FIXME: Delete from work_item_step where work_item_id...
'work_item',
#
# MAYBE: delete from item_event_read where item_id = ...
# MAYBE: delete from item_event_alert
#
'gtfsdb_cache_links',
'gtfsdb_cache_register',
#
'group_revision',
'group_membership',
'group_',
#
'group_item_access',
# FIXME: Delete from branch_conflict where branch_system_id...
'branch',
'item_versioned',
#
# MAYBE:
# delete from revert_event where rid_reverting/rid_victim
# 'revision',
#
# MAYBE:
# delete from track_point where track_id...
# 'track',
#
# MAYBE: tilecache tables...
]
for table in tables:
self.qb.db.sql("DELETE FROM %s WHERE branch_id = %d"
% (table, self.qb.branch_hier[0][0],))
log.debug('purge_branch_: delete from %d tables in %s'
% (len(tables),
misc.time_format_elapsed(time_0),))
# ***
#
def load_groups(self):
log.info('Loading groups for branch.')
# MAYBE: Should this be a group or branch fcn, making the special group
# names? For now, just getting the basename...
#
group_basename = self.the_branch.branch_groups_basename()
#
special_name = '%s Owners' % (group_basename,)
group_id, group_name = group.Many.group_resolve(self.qb.db, special_name)
if (not group_id) or (not group_name):
raise Exception('Group named "%s" not found.' % (special_name,))
self.sid_owners = group_id
#
special_name = '%s Arbiters' % (group_basename,)
group_id, group_name = group.Many.group_resolve(self.qb.db, special_name)
if (not group_id) or (not group_name):
raise Exception('Group named "%s" not found.' % (special_name,))
self.sid_arbiters = group_id
#
special_name = '%s Editors' % (group_basename,)
group_id, group_name = group.Many.group_resolve(self.qb.db, special_name)
if (not group_id) or (not group_name):
raise Exception('Group named "%s" not found.' % (special_name,))
self.sid_editors = group_id
# Skipping: "All Users", i.e., the Public user group.
# ***
#
def make_groups(self):
log.info('Making groups for new branch.')
#make same-named-as-branch group
#also make _Arbiter: group (and make making that name a fcn in the group or
# branch class)
# FIXME: Check that group names do not exist (probably do same for branch
# name, too).
# FIXME: We don't use this group, really, I think all the branchy users
# are Arbiters, at least that's how it's setup... and individual users
# get editor access to the branch, anyway, right? Argh...
common_row = {
# From item_versioned
'system_id' : None, # assigned later
'branch_id' : None, # assigned later
'version' : 0,
'deleted' : False,
'reverted' : False,
# MAGIC_NUMBER: Starting at rid 1.
'valid_start_rid' : 1,
'valid_until_rid' : None,
# From groupy_base
#'group_id' : new_group_id,
#'group_name' : self.cli_opts.new_branch_name,
# From group
'access_scope_id' : Access_Scope.shared,
}
# FIXME: Make sure these groups do not already exist.
common_row.update({
'stack_id' : self.qb.item_mgr.get_next_client_id(),
'name' : '%s Owners' % self.cli_opts.new_branch_name,
'description' : '%s Owners' % (self.cli_opts.new_branch_name,),
})
self.owner_group = group.One(qb=self.qb, row=common_row)
self.make_groups_group(self.owner_group)
self.sid_owners = self.owner_group.stack_id
common_row.update({
'stack_id' : self.qb.item_mgr.get_next_client_id(),
'name' : '%s Arbiters' % self.cli_opts.new_branch_name,
'description' : '%s Arbiters' % (self.cli_opts.new_branch_name,),
})
self.arbiter_group = group.One(qb=self.qb, row=common_row)
self.make_groups_group(self.arbiter_group)
self.sid_arbiters = self.arbiter_group.stack_id
common_row.update({
'stack_id' : self.qb.item_mgr.get_next_client_id(),
'name' : '%s Editors' % self.cli_opts.new_branch_name,
'description' : '%s Editors' % (self.cli_opts.new_branch_name,),
})
self.editor_group = group.One(qb=self.qb, row=common_row)
self.make_groups_group(self.editor_group)
self.sid_editors = self.editor_group.stack_id
#
def make_groups_group(self, the_group):
if self.stack_id < 0:
client_id = self.stack_id
else:
client_id = None
the_group.stack_id_correct(self.qb)
g.assurt(the_group.fresh)
log.debug('make_groups_group: clearing item_cache')
self.qb.item_mgr.item_cache_reset()
self.qb.item_mgr.item_cache_add(the_group, client_id)
prepared = self.qb.grac_mgr.prepare_item(self.qb,
the_group, Access_Level.editor, ref_item=None)
g.assurt(prepared)
the_group.version_finalize_and_increment(self.qb,
self.qb.item_mgr.rid_new)
the_group.save(self.qb, self.qb.item_mgr.rid_new)
# ***
#
def get_branch_callbacks(self):
# The callback_class is, e.g., 'MetC_Bikeways_Defs'.
# E.g.,
# merge.branches.metc_bikeways_defs:MetC_Bikeways_Defs:process_import
# merge.branches.metc_bikeways_defs:MetC_Bikeways_Defs:process_export
import_callback = ''
export_callback = ''
if self.cli_opts.callback_class:
import_callback = ('merge.branches.%s:%s:process_import'
% (self.cli_opts.callback_class.lower(),
self.cli_opts.callback_class,))
export_callback = ('merge.branches.%s:%s:process_export'
% (self.cli_opts.callback_class.lower(),
self.cli_opts.callback_class,))
return import_callback, export_callback
#
def get_tile_skin_names(self):
# We default to _not_ rastering raster tiles.
tile_skins = None
if self.cli_opts.tile_skins:
for skin_name in self.cli_opts.tile_skins:
g.assurt(',' not in skin_name)
tile_skins = ','.join(self.cli_opts.tile_skins)
return tile_skins
# ***
#
def update_branch(self):
#branch_id = branch.Many.public_branch_id(self.qb)
#revision_id = self.qb.item_mgr.rid_new
#rev = revision.Historic(revision_id, allow_deleted=False)
rev = revision.Current()
(branch_id, branch_hier) = branch.Many.branch_id_resolve(self.qb.db,
self.cli_opts.update_branch, branch_hier_rev=rev)
# Be sure to get the item_stack table or, when we save, access_style_id
# won't be set and our not null constraint will complain.
self.qb.filters.include_item_stack = True
branches = branch.Many()
branches.search_by_stack_id(branch_id, self.qb)
if len(branches) != 1:
raise Exception('Branch named "%s" not found.'
% (self.cli_opts.update_branch,))
g.assurt(len(branches) == 1)
the_branch = branches[0]
# Currently, all branches are 'permissive'.
g.assurt(the_branch.access_style_id == Access_Style.permissive)
#
import_callback, export_callback = self.get_branch_callbacks()
if ((import_callback or export_callback)
or (self.cli_opts.tile_skins is not None)):
if import_callback or export_callback:
log.debug('Overwriting import_callback: was: "%s" / now: "%s"'
% (the_branch.import_callback, import_callback,))
the_branch.import_callback = import_callback
log.debug('Overwriting export_callback: was: "%s" / now: "%s"'
% (the_branch.export_callback, export_callback,))
the_branch.export_callback = export_callback
if self.cli_opts.tile_skins is not None:
tile_skins = self.get_tile_skin_names()
log.debug('Overwriting tile_skins: was: "%s" / now: "%s"'
% (the_branch.tile_skins, tile_skins,))
the_branch.tile_skins = tile_skins
# MAYBE: Call prepare_and_save_item? Or just do it ourselves?
# NOTE: grac_mgr.prepare_existing_from_stack_id calls
# validize, which calls groups_access_load_from_db...
# which we just call ourselves. I think this works.
is_new_item = False
the_branch.validize(self.qb, is_new_item,
item_base.One.dirty_reason_item_user,
ref_item=None)
rid_new = self.qb.item_mgr.rid_new
the_branch.version_finalize_and_increment(self.qb, rid_new)
the_branch.save(self.qb, rid_new)
self.the_branch = the_branch
# ***
#
def make_branch(self):
# FIXME: If user specifies --branch, we should make the new branch
# descend from the specified branch.
import_callback, export_callback = self.get_branch_callbacks()
tile_skins = self.get_tile_skin_names()
# FIXME: Ensure that name is unique! I.e., check the new branch name and
# check that the required group names are available.
if self.cli_opts.is_basemap:
parent_id = None
else:
parent_id = self.qb.branch_hier[0][0]
last_merge_rid = self.cli_opts.last_merge_rid or self.qb.item_mgr.rid_new
log.info('Making new branch: "%s" / parent_id: %s / last_merge_rid: %s'
% (self.cli_opts.new_branch_name, parent_id, last_merge_rid,))
new_branch = branch.One(
qb=self.qb,
row={
# item_versioned
'system_id' : None, # assigned later
'branch_id' : None, # assigned later
'stack_id' : self.qb.item_mgr.get_next_client_id(),
'version' : 0,
'deleted' : False,
'reverted' : False,
'name' : self.cli_opts.new_branch_name,
'valid_start_rid' : None,
'valid_until_rid' : None,
# branch
'parent_id' : parent_id,
'last_merge_rid' : last_merge_rid,
'conflicts_resolved' : True,
'import_callback' : import_callback,
'export_callback' : export_callback,
'tile_skins' : tile_skins,
# Skipping: coverage_area. See gen_tilecache_cfg.py.
}
)
# Make the user who's running the script the branch owner.
# And give the new branch groups access, too.
g.assurt(self.qb.user_group_id)
# Also make an entry for the Public, so it's easy to set this.
# MAYBE: Maybe these are the groups and there are no other choices?
# That would really simply the client and other operations involving
# editing GIA records...
pub_grp_id = group.Many.public_group_id(self.qb.db)
#
target_groups = {
self.qb.user_group_id : Access_Level.owner,
self.sid_editors : Access_Level.editor,
self.sid_arbiters : Access_Level.arbiter,
self.sid_owners : Access_Level.owner,
pub_grp_id : Access_Level.denied,
}
# All branches are access-style 'permissive'. Cyclopath might some day
# support, e.g., 'usr_choice', on sub-branches, i.e., so an agency can
# let their users create their own branches. But right now, since
# branches are pretty specially wired into the system and require special
# scripts to setup and maintain, we'll just stick with 'permissive'.
new_branch.access_style_id = Access_Style.permissive
# access_infer_id is set from item_stack.save_core.get_access_infer().
# NOTE: We set the valid_start_rid to 1 so that the branch is viewable
# at historic revisions, i.e., that the user sees what the parent
# branch looked like back then. If we didn't do this, it makes some
# operations fail, i.e., valid_start_rid cannot be greater than
# last_merge_rid, so importing fails if the user cannot see the
# branch at the last_merge_rid.
# Skipping: self.qb.item_mgr.rid_new.
first_rid = 1
new_branch.prepare_and_save_item(self.qb,
target_groups=target_groups,
rid_new=first_rid,
ref_item=None)
log.info('Created branch: %s (%d)'
% (new_branch.name, new_branch.stack_id,))
# Make the branch_hier.
revision_id = self.qb.item_mgr.rid_new
rev = revision.Historic(revision_id, allow_deleted=False)
(branch_id, branch_hier) = branch.Many.branch_id_resolve(self.qb.db,
new_branch.stack_id, branch_hier_rev=rev)
self.qb.branch_hier_set(branch_hier)
# not needed: self.qb.revision = branch_hier[0][1]
# MEH: Set self.cli_args.branch_hier? No one should be using branch_hier
# from cli_args, so, why bother... let the assurts fly instead.
# Whatever: self.cli_args.branch_hier = branch_hier
self.the_branch = new_branch
# ***
#
def add_members(self):
log.info('Adding branch user group memberships.')
# MAYBE: Use this script to update the public basemap after the
# v1-v2 upgrade? i.e., don't create group memberships in the
# upgrade scripts!
# Group Memberships are saved with the basemap branch ID. We don't need
# to clone the db, and if we did, we'd want to relock the 'revision'
# table (i.e., grac_mgr expects: db.locked_tables == ['revision',]).
basemap_qb = self.qb.clone(db_clone=False)
# Use the basemap branch. At the Current revision.
parentest = basemap_qb.branch_hier[-1]
branch_hier = [(parentest[0], revision.Current(), parentest[2],),]
basemap_qb.branch_hier_set(branch_hier)
common_row = {
# From item_versioned
'system_id' : None, # assigned later
'branch_id' : None, # assigned later
'version' : 0,
'deleted' : False,
'reverted' : False,
'name' : '',
'valid_start_rid' : 1,
'valid_until_rid' : None,
# From groupy_base
#'group_name' : None,#self.cli_opts.new_branch_name,
# From group_membership
'opt_out' : False,
#'group_desc' : '',
#'group_scope' : Access_Scope.shared,
'access_level_id' : Access_Level.editor,
}
usernames = list(set(self.cli_opts.editors
+ self.cli_opts.arbiters
+ self.cli_opts.owners))
self.add_members_to_group(basemap_qb, common_row,
self.sid_editors, usernames)
usernames = list(set(self.cli_opts.arbiters
+ self.cli_opts.owners))
self.add_members_to_group(basemap_qb, common_row,
self.sid_arbiters, usernames)
usernames = list(set(self.cli_opts.owners))
self.add_members_to_group(basemap_qb, common_row,
self.sid_owners, usernames)
#
def add_members_to_group(self, basemap_qb, common_row,
group_sid, usernames):
log.debug('add_members_to_group: group_sid: %d.' % (group_sid,))
grp_mmbs = group_membership.Many()
grp_mmbs.search_by_group_id(basemap_qb, group_sid)
group_uids = {}
for gm in grp_mmbs:
group_uids[gm.user_id] = gm
for uname in usernames:
try:
user_id = User.user_id_from_username(basemap_qb.db, uname)
except GWIS_Warning, e:
user_id = None
log.warning('add_members_to_group: no such user: %s' % (uname,))
if user_id:
if not (user_id in group_uids):
common_row.update({
'stack_id' : basemap_qb.item_mgr.get_next_client_id(),
'group_id' : group_sid,
'user_id' : user_id,
'username' : uname,
})
new_mmbrship = group_membership.One(qb=basemap_qb,
row=common_row)
self.add_members_save_mmbrship(basemap_qb, new_mmbrship)
else:
existing_gm = group_uids[user_id]
g.assurt(existing_gm.access_level_id == Access_Level.editor)
log.info('add_members: user already member: %s in %s'
% (existing_gm.username, existing_gm.group_name,))
#
def add_members_save_mmbrship(self, basemap_qb, new_mmbrship):
# See also: cp_group_membership_new.
if self.stack_id < 0:
client_id = self.stack_id
else:
client_id = None
new_mmbrship.stack_id_correct(basemap_qb)
g.assurt(new_mmbrship.fresh)
log.debug('add_members_save_mmbrship: clearing item_cache')
basemap_qb.item_mgr.item_cache_reset()
basemap_qb.item_mgr.item_cache_add(new_mmbrship, client_id)
prepared = basemap_qb.grac_mgr.prepare_item(basemap_qb,
new_mmbrship, Access_Level.editor, ref_item=None)
g.assurt(prepared)
new_mmbrship.version_finalize_and_increment(basemap_qb,
basemap_qb.item_mgr.rid_new)
new_mmbrship.save(basemap_qb, basemap_qb.item_mgr.rid_new)
# ***
#
def reset_current_qb(self):
self.current_qb = self.qb.clone(db_clone=False)
branch_hier = copy.copy(self.current_qb.branch_hier)
branch_hier[0] = (
branch_hier[0][0], revision.Current(), branch_hier[0][2],)
self.current_qb.branch_hier_set(branch_hier)
#
def init_newips(self):
log.info('Initializing new item policies.')
self.reset_current_qb()
nipi = New_Item_Policy_Init()
nipi.install_nips(self.current_qb, policy_profile='standard')
# ***
#
def clone_nodes(self):
log.info('Cloning node data from parent branch.')
# HACK: Is this cheating? Whatever, it works fine... maybe it's just
# clever.
# Create an instance of the node maker script but set it up sneakily and
# don't call it's go_main.
ncm = Node_Cache_Maker()
ncm.cli_args = ncm.argparser()
# This doesn't work: ncm.cli_opts = ncm.cli_args.get_opts()
g.assurt(self.current_qb is not None)
ncm.qb = self.current_qb
# Note that ncm.cli_args.qb does not exist because we haven't triggered
# ncm.query_builder_prepare().
ncm.cli_args.branch_id = self.qb.branch_hier[0][0]
# MEH: propagate debug_prog_log to ncm; for now, you can just edit it's
# debug_prog_log.
if not self.cli_opts.is_basemap:
# Don't call create_tables:
# NO: ncm.create_tables()
# self.reset_current_qb()
# ncm.qb = self.current_qb
# FIXME/CAVEAT: If your new branch has an old last_merge_rid, using
# quick_nodes probably doesn't make sense.
# Should we check last_merge_rid and maybe do full node
# rebuild? Or let the caller call node_cache_maker.py?
# See upgrade_ccpv1-v2.sh: it calls make_new_branch.ph
# and then node_cache_maker.py...
ncm.quick_nodes()
# This one takes a five minutes...
ncm.add_internals()
else:
# If a basemap, there is not geometry yet, so no nodes.
# But if this is a rebuild of the public basemape after a V1->V2
# upgrade...
# FIXME: How do you detect this? Expecting new_branch_name and not
# using --branch=0 so maybe cli_opts needs a tweaking.
# FIXME: Implement this if...
if False:
ncm.make_nodes()
ncm.add_internals()
ncm.update_route()
# ***
# ***
if (__name__ == '__main__'):
make_new_branch = Make_New_Branch()
make_new_branch.go()
# ***
#
# 2012.08.03: [lb]: These are the raw commands from a copy and paste file I've
# been using up until creating the make_new_branch.py script. You *should* (if
# these comments are kept current) be able to run the commands below to achieve
# what's being done above. You'll find these commands useful not just for unit
# testing, but also if you need to recreate part of a branch, i.e., maybe you
# already have a branch but you want to redo the node_endpoint table, well, the
# examples below show you how to call the node_cache_maker script for the
# basemap and branchier branches.
"""
# *** The basics.
export pyserver=$cp/pyserver
export dest_db=ccpv2
export runic=$cp/scripts/setupcp
# Restart Apache.
# E.g., sudo service httpd restart
re
# *** The one-time public basemap scripts you gotta run.
# If you haven't done so already, build the node tables.
# Circa 2012.08.01:
# --populate-nodes 133005 loops took 47.48 mins. (+5 minutes for commit)
# --add-internals 154942 loops took 2.84 mins.
# --update-route 102581 loops took 0.39 mins.
cd $runic
./node_cache_maker.py --create-tables
./node_cache_maker.py --branch 0 \
--populate-nodes --add-internals --update-route
# If you haven't done so already, create the new item policies for the basemap.
# Skipping: echo "DELETE FROM minnesota.new_item_policy;" \
# | psql -U postgres -d $dest_db --no-psqlrc
cd $runic
./new_item_policy_init.py \
-U landonb --no-password \
-b 0 \
--profile='standard' \
-m "Create new item policies for basemap."
# If you haven't done so already, add the merge_job callbacks for the basemap.
cd $pyserver
./ccp.py -U landonb --no-password \
-u -t branch \
-b 0 \
-m "Add import callback to branch." \
-e import_callback \
"merge.branches.metc_bikeways_defs:Public_Basemap_Defs:process_import" \
-e export_callback \
"merge.branches.metc_bikeways_defs:Public_Basemap_Defs:process_export"
# *** Make the new branch.
# Add new branch
cd $pyserver
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-c -t branch \
-m "Make Branch: Metc Bikeways 2012" \
-e name "Metc Bikeways 2012" \
-e last_merge_rid 14124
# It can take upwards of an hour to *calculate* the node table data, but since
# a new branch has the same data as its parent, we can just copy it, which
# should take only a matter of minutes.
# Skipping:
# ./node_cache_maker.py --branch "Metc Bikeways 2012" \
# --populate-nodes --add-internals --update-route
cd $runic
./node_cache_maker.py --branch "Metc Bikeways 2012" \
--quick-nodes --purge-rows
./node_cache_maker.py --branch "Metc Bikeways 2012" \
--add-internals --update-route
# 2012.08.08: The branch is already populated, so we need to rebuild...
cd $runic
./node_cache_maker.py --branch "Metc Bikeways 2012" \
--populate-nodes --add-internals --update-route
# Make a group for the branch.
# NOTE: Use valid_start_rid = 1, so users can see
# revisions before the current revision (i.e.,
# the current revision is > last_merge_rid, so
# user's wouldn't be able to import because they
# can't see last_merge_rid). We could just set
# valid_start_rid to last_merge_rid, but that's
# also restrictive, so we set it to 1; in most
# cases, the parent branch is the public basemap,
# so this is appropriate, and for other cases, we'll
# still check the user's access to the parent branch.
# 2012.08.13: Deprecated. See well-known group names, below.
#cd $pyserver
#./ccp.py -U landonb --no-password \
# -f request_is_a_test 0 \
# -c -t group \
# -m "Make Group: Metc Bikeways 2012" \
# -e name "Metc Bikeways 2012" \
# -e access_scope_id 2 \
# -e valid_start_rid 1 \
# -e description "Metc Bikeways 2012"
# Make a group_item_access record to give the new group editor access to the
# new branch.
# FIXME: This makes just one new GIA record; implement acl_grouping?
# 2012.08.13: Deprecated. See well-known group names, below.
#cd $pyserver
#./ccp.py -U landonb --no-password \
# -f request_is_a_test 0 \
# -u -t branch \
# -b "Metc Bikeways 2012" \
# -f filter_by_text_exact "Metc Bikeways 2012" \
# -m "Grant Access: Metc Bikeways 2012 - Group <=> Branch." \
# --gia group_name "Metc Bikeways 2012" \
# --gia access_level_id 3 \
# --gia valid_start_rid 1
# 2012.08.08: Adding well-known group names.
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-c -t group \
-m "Make Group: Metc Bikeways 2012 Arbiters" \
-e name "Metc Bikeways 2012 Arbiters" \
-e access_scope_id 2 \
-e valid_start_rid 1 \
-e description "Metc Bikeways 2012 Arbiters"
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-c -t group \
-m "Make Group: Metc Bikeways 2012 Editors" \
-e name "Metc Bikeways 2012 Editors" \
-e access_scope_id 2 \
-e valid_start_rid 1 \
-e description "Metc Bikeways 2012 Editors"
#
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-u -t branch \
-b "Metc Bikeways 2012" \
-f filter_by_text_exact "landonb" \
-m "Grant Branch Access: landonb" \
--gia group_name "landonb" \
--gia access_level_id 1 \
--gia valid_start_rid 1
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-u -t branch \
-b "Metc Bikeways 2012" \
-f filter_by_text_exact "Metc Bikeways 2012 Arbiters" \
-m "Grant Branch Access: Metc Bikeways 2012 Arbiters" \
--gia group_name "Metc Bikeways 2012 Arbiters" \
--gia access_level_id 2 \
--gia valid_start_rid 1
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-u -t branch \
-b "Metc Bikeways 2012" \
-f filter_by_text_exact "Metc Bikeways 2012 Editors" \
-m "Grant Branch Access: Metc Bikeways 2012 Editors" \
--gia group_name "Metc Bikeways 2012 Editors" \
--gia access_level_id 3 \
--gia valid_start_rid 1
# *** Setup the new branch's new item policy.
cd $runic
./new_item_policy_init.py \
-U landonb --no-password \
-b "Metc Bikeways 2012" \
--profile='standard' \
-m "Create new item policies for branch."
# *** Add the merge_job callbacks.
cd $pyserver
./ccp.py -U landonb --no-password \
-u -t branch \
-b "Metc Bikeways 2012" \
-f filter_by_text_exact "Metc Bikeways 2012" \
-m "Add import callback to branch." \
-e import_callback \
"merge.branches.metc_bikeways_defs:MetC_Bikeways_Defs:process_import" \
-e export_callback \
"merge.branches.metc_bikeways_defs:MetC_Bikeways_Defs:process_export"
# *** Add users to the new group.
# FIXME: group_membership adds duplicates without checking first...
# so
# DELETE FROM group_membership where group_id = (
# SELECT stack_id FROM group_
# WHERE name = 'Metc Bikeways 2012' AND access_scope_id = 2
# );
# SELECT * FROM group_membership
# WHERE group_id = (SELECT stack_id FROM group_
# WHERE name = 'Metc Bikeways 2012' AND access_scope_id = 2);
## Add owners to the MetC group.
#cd $pyserver
#for uname in \
# "landonb" \
# "mludwig" \
# ; do
# ./ccp.py -U landonb --no-password \
# -f request_is_a_test 0 \
# -c -t group_membership \
# -m "Add users to group: Metc Bikeways 2012 Owners." \
# -e name "" \
# -e access_level_id 1 \
# -e valid_start_rid 1 \
# -e opt_out 0 \
# -e username ${uname} \
# -e group_name "Metc Bikeways 2012 Owners"
#done
# Add arbiters to the MetC group.
cd $pyserver
for uname in \
"jane" \
"john" \
; do
./ccp.py -U landonb --no-password \
-f request_is_a_test 0 \
-c -t group_membership \
-m "Add users to group: Metc Bikeways 2012 Arbiters." \
-e name "" \
-e access_level_id 2 \
-e valid_start_rid 1 \
-e opt_out 0 \
-e username ${uname} \
-e group_name "Metc Bikeways 2012 Arbiters"
done
#########################################################################
# FIXME: What about building tiles for the branch?
# What about setting up cron jobs or whatnots?
# 2012.08.08: This is old code from the cut-n-paste file but I haven't built
# tiles in a long, long time.
# This does not work. Trying to oversudo myself, apparently. =)
# sudo -u www-data \
# INSTANCE=minnesota nohup \
# ./tilecache_update.py -N -A -L -Z | tee tcupdate.txt 2>&1 &
# Make sure apache can write to our file.
# FIXME: File path. Should be cp/?
touch $cp_dev/mapserver/tcupdate.txt
chmod 666 $cp_dev/mapserver/tcupdate.txt
# NOTE: You cannot sudo -u ... nohup, so make 'em separate operations.
# FIXME: make this $httpd_user
sudo su - www-data
cd $cp_dev/mapserver
INSTANCE=minnesota nohup ./tilecache_update.py -N -A -L -Z \
| tee tcupdate.txt 2>&1 &
INSTANCE=minnesota nohup ./tilecache_update.py -a \
| tee tcupdatE.txt 2>&1 &
"""
| |
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sippy.SipConf import SipConf
try:
from urllib import quote, unquote
except ImportError:
from urllib.parse import quote, unquote
RFC3261_USER_UNRESERVED = '&=+$,;?/#'
# Quote from RFC-3261:
# Several rules are incorporated from RFC 2396 [5] but are updated to
# make them compliant with RFC 2234
RFC3261_MARK = '-_.!~*\'()'
USERNAME_SAFE = RFC3261_USER_UNRESERVED + RFC3261_MARK
class SipURL(object):
scheme = None
username = None
userparams = None
password = None
host = None
port = None
headers = None
usertype = None
transport = None
ttl = None
maddr = None
method = None
tag = None
other = None
lr = False
def __init__(self, url = None, username = None, password = None, host = None, port = None, headers = None, \
usertype = None, transport = None, ttl = None, maddr = None, method = None, tag = None, other = None, \
userparams = None, lr = False, relaxedparser = False, scheme = "sip"):
self.original_uri = url
self.other = []
self.userparams = []
if url == None:
self.scheme = scheme
self.username = username
if userparams != None:
self.userparams = userparams
self.password = password
if host == None:
self.host = SipConf.my_address
self.port = SipConf.my_port
else:
self.host = host
self.port = port
self.headers = headers
self.usertype = usertype
self.transport = transport
self.ttl = ttl
self.maddr = maddr
self.method = method
self.tag = tag
if other != None:
self.other = other
self.lr = lr
return
parts = url.split(':', 1)
if len(parts) < 2:
# scheme is missing, assume sip:
parts.insert(0, 'sip')
parts[0] = parts[0].lower()
if parts[0] not in ('sip', 'sips', 'tel'):
raise ValueError('unsupported scheme: %s:' % parts[0])
self.scheme, url = parts
if self.scheme == 'tel':
if SipConf.autoconvert_tel_url:
self.convertTelURL(url, relaxedparser)
else:
raise ValueError('tel: scheme is not supported')
else:
self.parseSipURL(url, relaxedparser)
def convertTelURL(self, url, relaxedparser):
self.scheme = 'sip'
if relaxedparser:
self.host = ''
else:
self.host = SipConf.my_address
self.port = SipConf.my_port
parts = url.split(';')
self.username = unquote(parts[0])
if len(parts) > 1:
# parse userparams
self.userparams = []
for part in parts[1:]:
# The RFC-3261 suggests the user parameter keys should
# be converted to lower case.
k, v = part.split('=')
self.userparams.append(k.lower() + '=' + v)
def parseSipURL(self, url, relaxedparser):
ear = url.find('@') + 1
parts = url[ear:].split(';')
userdomain, params = url[0:ear] + parts[0], parts[1:]
if len(params) == 0 and '?' in userdomain[ear:]:
self.headers = {}
userdomain_suff, headers = userdomain[ear:].split('?', 1)
userdomain = userdomain[:ear] + userdomain_suff
for header in headers.split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
if ear > 0:
userpass = userdomain[:ear - 1]
hostport = userdomain[ear:]
upparts = userpass.split(':', 1)
if len(upparts) > 1:
self.password = upparts[1]
uparts = upparts[0].split(';')
if len(uparts) > 1:
self.userparams = uparts[1:]
self.username = unquote(uparts[0])
else:
hostport = userdomain
parseport = None
if relaxedparser and len(hostport) == 0:
self.host = ''
elif hostport[0] == '[':
# IPv6 host
hpparts = hostport.split(']', 1)
self.host = hpparts[0] + ']'
if len(hpparts[1]) > 0:
hpparts = hpparts[1].split(':', 1)
if len(hpparts) > 1:
parseport = hpparts[1]
else:
# IPv4 host
hpparts = hostport.split(':', 1)
if len(hpparts) == 1:
self.host = hpparts[0]
else:
self.host = hpparts[0]
parseport = hpparts[1]
if parseport != None:
try:
self.port = int(parseport)
except Exception as e:
# Can't parse port number, check why
port = parseport.strip()
if len(port) == 0:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, empty port number, ' \
'assuming default: "%s"' % str(self.original_uri))
elif port.find(':') > 0:
pparts = port.split(':', 1)
if pparts[0] == pparts[1]:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, duplicate port number, ' \
'taking "%s": %s' % (pparts[0], str(self.original_uri)))
self.port = int(pparts[0])
else:
raise e
else:
raise e
if len(params) > 0:
last_param = params[-1]
arr = last_param.split('?', 1)
params[-1] = arr[0]
self.setParams(params)
if len(arr) == 2:
self.headers = {}
for header in arr[1].split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
def setParams(self, params):
self.usertype = None
self.transport = None
self.ttl = None
self.maddr = None
self.method = None
self.tag = None
self.other = []
self.lr = False
for p in params:
nv = p.split('=', 1)
if len(nv) == 1:
if p == 'lr':
self.lr = True
else:
self.other.append(p)
continue
name, value = nv
if name == 'user':
self.usertype = value
elif name == 'transport':
self.transport = value
elif name == 'ttl':
self.ttl = int(value)
elif name == 'maddr':
self.maddr = value
elif name == 'method':
self.method = value
elif name == 'tag':
self.tag = value
elif name == 'lr':
# RFC 3261 doesn't allow lr parameter to have a value,
# but many stupid implementation do it anyway
self.lr = True
else:
self.other.append(p)
def __str__(self):
return self.localStr()
def localStr(self, local_addr = None, local_port = None):
l = []; w = l.append
w(self.scheme + ':')
if self.username != None:
w(quote(self.username, USERNAME_SAFE))
for v in self.userparams:
w(';%s' % v)
if self.password != None:
w(':%s' % self.password)
w('@')
if local_addr != None and 'my' in dir(self.host):
w(local_addr)
else:
w(str(self.host))
if self.port != None:
if local_port != None and 'my' in dir(self.port):
w(':%d' % local_port)
else:
w(':%d' % self.port)
for p in self.getParams():
w(';%s' % p)
if self.headers:
w('?')
w('&'.join([('%s=%s' % (h, quote(v))) for (h, v) in self.headers.items()]))
return ''.join(l)
def getParams(self):
res = []; w = res.append
if self.usertype != None:
w('user=%s' % self.usertype)
for n in ('transport', 'ttl', 'maddr', 'method', 'tag'):
v = getattr(self, n)
if v != None:
w('%s=%s' % (n, v))
for v in self.other:
w(v)
if self.lr:
w('lr')
return res
def getCopy(self):
return SipURL(username = self.username, password = self.password, host = self.host, port = self.port, \
headers = self.headers, usertype = self.usertype, transport = self.transport, ttl = self.ttl, \
maddr = self.maddr, method = self.method, tag = self.tag, other = list(self.other), \
userparams = list(self.userparams), lr = self.lr)
def getHost(self):
return self.host
def getPort(self):
if self.port != None:
return self.port
else:
return SipConf.default_port
def getAddr(self):
if self.port != None:
return (self.host, self.port)
else:
return (self.host, SipConf.default_port)
def setAddr(self, addr):
self.host, self.port = addr
if __name__ == '__main__':
import sys
test_set = (('sip:user;par=u%40example.net@example.com', ()), \
('sip:user@example.com?Route=%3Csip:example.com%3E', ()), \
('sip:[2001:db8::10]', ()), \
('sip:[2001:db8::10]:5070', ()), \
('sip:user@example.net;tag=9817--94', ('tag=9817--94',)), \
('sip:alice@atlanta.com;ttl=15;maddr=239.255.255.1', ('ttl=15', 'maddr=239.255.255.1')), \
('sip:alice:secretword@atlanta.com;transport=tcp', ('transport=tcp',)), \
('sip:alice@atlanta.com?subject=project%20x&priority=urgent', ()), \
('sip:+1-212-555-1212:1234@gateway.com;user=phone', ('user=phone',)), \
('sip:atlanta.com;method=REGISTER?to=alice%40atlanta.com', ('method=REGISTER',)), \
('sip:alice;day=tuesday@atlanta.com', ()), \
('sip:+611234567890@ims.mnc000.mcc000.3gppnetwork.org;user=phone;npdi', ('user=phone', 'npdi')), \
('sip:1234#567890@example.com', ()), \
('sip:foo@1.2.3.4:', ()), \
('sip:foo@1.2.3.4:5060:5060', ()))
for u, mp in test_set:
su = SipURL(u)
sp = su.getParams()
print(tuple(sp), mp, su.getHost(), su.getPort())
if str(su) != u:
sys.stderr.write('URI cannot be reconstructed precisely: expected \'%s\' got \'%s\'\n' % (u, str(su)))
| |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from unittest import mock
from absl.testing import absltest
import grpc
from grpc.framework.foundation import logging_pool
import portpicker
import tensorflow as tf
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.executors import eager_tf_executor
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_factory
from tensorflow_federated.python.core.impl.executors import executor_serialization
from tensorflow_federated.python.core.impl.executors import executor_service
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import placements
class TestEnv(object):
"""A test environment that consists of a single client and backend service."""
def __init__(self,
ex_factory: executor_factory.ExecutorFactory,
num_clients: int = 0):
port = portpicker.pick_unused_port()
self._server_pool = logging_pool.pool(max_workers=1)
self._server = grpc.server(self._server_pool)
self._server.add_insecure_port('[::]:{}'.format(port))
self._service = executor_service.ExecutorService(ex_factory=ex_factory)
executor_pb2_grpc.add_ExecutorServicer_to_server(self._service,
self._server)
self._server.start()
self._channel = grpc.insecure_channel('localhost:{}'.format(port))
self._stub = executor_pb2_grpc.ExecutorStub(self._channel)
serialized_cards = executor_serialization.serialize_cardinalities(
{placements.CLIENTS: num_clients})
self._stub.SetCardinalities(
executor_pb2.SetCardinalitiesRequest(cardinalities=serialized_cards))
def __del__(self):
self._channel.close()
self._server_pool.shutdown(wait=False)
self._server.stop(None)
@property
def stub(self):
return self._stub
def get_value(self, value_id: str):
"""Retrieves a value using the `Compute` endpoint."""
response = self._stub.Compute(
executor_pb2.ComputeRequest(
value_ref=executor_pb2.ValueRef(id=value_id)))
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_serialization.deserialize_value(response.value)
return value
def get_value_future_directly(self, value_id: str):
"""Retrieves value by reaching inside the service object."""
with self._service._lock:
return self._service._values[value_id]
def close_channel(self):
self._channel.close()
class ExecutorServiceTest(absltest.TestCase):
def test_executor_service_slowly_create_tensor_value(self):
class SlowExecutorValue(executor_value_base.ExecutorValue):
def __init__(self, v, t):
self._v = v
self._t = t
@property
def type_signature(self):
return self._t
async def compute(self):
return self._v
class SlowExecutor(executor_base.Executor):
def __init__(self):
self.status = 'idle'
self.busy = threading.Event()
self.done = threading.Event()
async def create_value(self, value, type_spec=None):
self.status = 'busy'
self.busy.set()
self.done.wait()
self.status = 'done'
return SlowExecutorValue(value, type_spec)
async def create_call(self, comp, arg=None):
raise NotImplementedError
async def create_struct(self, elements):
raise NotImplementedError
async def create_selection(self, source, index):
raise NotImplementedError
def close(self):
pass
ex = SlowExecutor()
ex_factory = executor_stacks.ResourceManagingExecutorFactory(lambda _: ex)
env = TestEnv(ex_factory)
self.assertEqual(ex.status, 'idle')
value_proto, _ = executor_serialization.serialize_value(10, tf.int32)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
ex.busy.wait()
self.assertEqual(ex.status, 'busy')
ex.done.set()
value = env.get_value(response.value_ref.id)
self.assertEqual(ex.status, 'done')
self.assertEqual(value, 10)
def test_executor_service_create_tensor_value(self):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
value_proto, _ = executor_serialization.serialize_value(
tf.constant(10.0).numpy(), tf.float32)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
value_id = str(response.value_ref.id)
value = env.get_value(value_id)
self.assertEqual(value, 10.0)
del env
def test_executor_service_create_no_arg_computation_value_and_call(self):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
@computations.tf_computation
def comp():
return tf.constant(10)
value_proto, _ = executor_serialization.serialize_value(comp)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
response = env.stub.CreateCall(
executor_pb2.CreateCallRequest(function_ref=response.value_ref))
self.assertIsInstance(response, executor_pb2.CreateCallResponse)
value_id = str(response.value_ref.id)
value = env.get_value(value_id)
self.assertEqual(value, 10)
del env
def test_executor_service_value_unavailable_after_dispose(self):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
value_proto, _ = executor_serialization.serialize_value(
tf.constant(10.0).numpy(), tf.float32)
# Create the value
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
value_id = str(response.value_ref.id)
# Check that the value appears in the _values map
env.get_value_future_directly(value_id)
# Dispose of the value
dispose_request = executor_pb2.DisposeRequest()
dispose_request.value_ref.append(response.value_ref)
response = env.stub.Dispose(dispose_request)
self.assertIsInstance(response, executor_pb2.DisposeResponse)
# Check that the value is gone from the _values map
# get_value_future_directly is used here so that we can catch the
# exception rather than having it occur on the GRPC thread.
with self.assertRaises(KeyError):
env.get_value_future_directly(value_id)
def test_dispose_does_not_trigger_cleanup(self):
class MockFactory(executor_factory.ExecutorFactory, mock.MagicMock):
def create_executor(self, *args, **kwargs):
return mock.MagicMock()
def clean_up_executors(self):
return
ex_factory = MockFactory()
ex_factory.clean_up_executors = mock.MagicMock()
env = TestEnv(ex_factory)
value_proto, _ = executor_serialization.serialize_value(
tf.constant(10.0).numpy(), tf.float32)
# Create the value
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
value_id = str(response.value_ref.id)
# Check that the value appears in the _values map
env.get_value_future_directly(value_id)
# Dispose of the value
dispose_request = executor_pb2.DisposeRequest()
dispose_request.value_ref.append(response.value_ref)
response = env.stub.Dispose(dispose_request)
# We shouldn't be propagating close down the executor stack on Dispose--this
# would close the bidi stream and cause a hang in the streaming case with
# intermediate aggregation. Python GC takes care of pushing Dispose requests
# from the aggregators to the workers.
ex_factory.clean_up_executors.assert_not_called()
def test_executor_service_create_one_arg_computation_value_and_call(self):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
@computations.tf_computation(tf.int32)
def comp(x):
return tf.add(x, 1)
value_proto, _ = executor_serialization.serialize_value(comp)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
comp_ref = response.value_ref
value_proto, _ = executor_serialization.serialize_value(10, tf.int32)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
arg_ref = response.value_ref
response = env.stub.CreateCall(
executor_pb2.CreateCallRequest(
function_ref=comp_ref, argument_ref=arg_ref))
self.assertIsInstance(response, executor_pb2.CreateCallResponse)
value_id = str(response.value_ref.id)
value = env.get_value(value_id)
self.assertEqual(value, 11)
del env
def test_executor_service_create_and_select_from_tuple(self):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
value_proto, _ = executor_serialization.serialize_value(10, tf.int32)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
ten_ref = response.value_ref
self.assertEqual(env.get_value(ten_ref.id), 10)
value_proto, _ = executor_serialization.serialize_value(20, tf.int32)
response = env.stub.CreateValue(
executor_pb2.CreateValueRequest(value=value_proto))
self.assertIsInstance(response, executor_pb2.CreateValueResponse)
twenty_ref = response.value_ref
self.assertEqual(env.get_value(twenty_ref.id), 20)
response = env.stub.CreateStruct(
executor_pb2.CreateStructRequest(element=[
executor_pb2.CreateStructRequest.Element(
name='a', value_ref=ten_ref),
executor_pb2.CreateStructRequest.Element(
name='b', value_ref=twenty_ref)
]))
self.assertIsInstance(response, executor_pb2.CreateStructResponse)
tuple_ref = response.value_ref
self.assertEqual(str(env.get_value(tuple_ref.id)), '<a=10,b=20>')
for index, result_val in [(0, 10), (1, 20)]:
response = env.stub.CreateSelection(
executor_pb2.CreateSelectionRequest(
source_ref=tuple_ref, index=index))
self.assertIsInstance(response, executor_pb2.CreateSelectionResponse)
selection_ref = response.value_ref
self.assertEqual(env.get_value(selection_ref.id), result_val)
del env
@mock.patch.object(executor_stacks.ResourceManagingExecutorFactory,
'clean_up_executors')
def test_clear_executor_calls_cleanup(self, mock_cleanup):
ex_factory = executor_stacks.ResourceManagingExecutorFactory(
lambda _: eager_tf_executor.EagerTFExecutor())
env = TestEnv(ex_factory)
env.stub.ClearExecutor(executor_pb2.ClearExecutorRequest())
mock_cleanup.assert_called_once()
if __name__ == '__main__':
absltest.main()
| |
from PySide import QtGui, QtCore
from cdat.Base import BaseOkWindow
from cdat import axis_preview
from cdat.DictEdit import DictEditor
import vcs
class AxisEditorWidget(BaseOkWindow.BaseOkWindowWidget):
def __init__(self, axis, parent=None):
super(AxisEditorWidget, self).__init__()
self.axis = axis
self.state = None
# create layout so you can set the preview
self.horizontal_layout = QtGui.QHBoxLayout()
# create labels
tickmarks_label = QtGui.QLabel("Tickmarks:")
negative_label = QtGui.QLabel("Negative:")
ticks_label = QtGui.QLabel("Ticks:")
step_label = QtGui.QLabel("Tick Step:")
show_mini_label = QtGui.QLabel("Show Mini Ticks:")
mini_per_tick_label = QtGui.QLabel("Mini-Ticks Per Tick:")
preset_label = QtGui.QLabel("Preset:")
# create rows
tickmarks_row = QtGui.QHBoxLayout()
preset_row = QtGui.QHBoxLayout()
ticks_row = QtGui.QHBoxLayout()
mini_ticks_row = QtGui.QHBoxLayout()
# create widgets
self.ticks_widget = QtGui.QWidget()
self.ticks_widget.setLayout(ticks_row)
self.preset_widget = QtGui.QWidget()
self.preset_widget.setLayout(preset_row)
self.dict_widget = DictEditor.DictEditorWidget()
self.dict_widget.dictEdited.connect(self.updateAxisWithDict)
# set up scrollable for dict editor
self.scroll_area = QtGui.QScrollArea()
self.scroll_area.setWidget(self.dict_widget)
self.scroll_area.setWidgetResizable(True)
# Create radio buttons and group them
self.tickmark_button_group = QtGui.QButtonGroup()
tickmarks_row.addWidget(tickmarks_label)
for name in ["Auto", "Even", "Manual"]:
button = QtGui.QRadioButton(name)
tickmarks_row.addWidget(button)
if name == "Auto":
button.setChecked(True)
self.tickmark_button_group.addButton(button)
self.tickmark_button_group.buttonClicked.connect(self.updateTickmark)
# create preset combo box
# This in only being tracked for debugging
preset_box = QtGui.QComboBox()
preset_box.addItem("default")
preset_box.addItems(vcs.listelements("list"))
preset_box.currentIndexChanged[str].connect(self.updatePreset)
preset_row.addWidget(preset_label)
preset_row.addWidget(preset_box)
# create slider for Ticks
self.ticks_slider = QtGui.QSlider()
self.ticks_slider.setRange(1, 100)
self.ticks_slider.setOrientation(QtCore.Qt.Horizontal)
self.ticks_slider.sliderMoved.connect(self.updateTicks)
# create step edit box
step_validator = QtGui.QDoubleValidator()
self.step_edit = QtGui.QLineEdit()
self.step_edit.setValidator(step_validator)
self.step_edit.textEdited.connect(lambda: QtCore.QTimer.singleShot(1000, self.updateStep))
self.step_edit.editingFinished.connect(self.updateStep)
# create negative check box
self.negative_check = QtGui.QCheckBox()
self.negative_check.clicked.connect(self.updateTickSign)
ticks_row.addWidget(negative_label)
ticks_row.addWidget(self.negative_check)
ticks_row.addWidget(ticks_label)
ticks_row.addWidget(self.ticks_slider)
ticks_row.addWidget(step_label)
ticks_row.addWidget(self.step_edit)
# create show mini ticks check box
show_mini_check_box = QtGui.QCheckBox()
show_mini_check_box.stateChanged.connect(self.updateShowMiniTicks)
# create mini tick spin box
mini_tick_box = QtGui.QSpinBox()
mini_tick_box.setRange(0, 255)
mini_tick_box.valueChanged.connect(self.updateMiniTicks)
mini_ticks_row.addWidget(show_mini_label)
mini_ticks_row.addWidget(show_mini_check_box)
mini_ticks_row.addWidget(mini_per_tick_label)
mini_ticks_row.addWidget(mini_tick_box)
self.adjuster_layout = QtGui.QVBoxLayout()
self.adjuster_layout.insertLayout(0, tickmarks_row)
self.adjuster_layout.insertWidget(1, self.preset_widget)
self.adjuster_layout.insertLayout(2, mini_ticks_row)
self.horizontal_layout.addLayout(self.adjuster_layout)
self.vertical_layout.insertLayout(0, self.horizontal_layout)
self.setPreview(axis_preview.AxisPreviewWidget())
def setPreview(self, preview):
if self.preview:
raise Exception("Preview already set")
self.preview = preview
self.preview.setMinimumWidth(150)
self.preview.setMaximumWidth(350)
if self.axis == "y":
self.horizontal_layout.insertWidget(0, self.preview)
elif self.axis == "x":
self.adjuster_layout.insertWidget(0, self.preview)
def setAxisObject(self, axis_obj):
self.object = axis_obj
self.preview.setAxisObject(self.object)
self.preview.update()
# Update mode essentially
def updateTickmark(self, button):
if self.axis == "x":
index = 2
elif self.axis == "y":
index = 1
while self.adjuster_layout.count() > index + 1:
widget = self.adjuster_layout.takeAt(index).widget()
widget.setVisible(False)
if button.text() == "Auto":
self.adjuster_layout.insertWidget(index, self.preset_widget)
self.preset_widget.setVisible(True)
elif button.text() == "Even":
self.adjuster_layout.insertWidget(index, self.ticks_widget)
self.ticks_widget.setVisible(True)
self.state = "count"
elif button.text() == "Manual":
self.adjuster_layout.insertWidget(index, self.scroll_area)
self.dict_widget.setDict(self.object.ticks_as_dict())
self.dict_widget.setVisible(True)
self.object.mode = button.text().lower()
self.preview.update()
def updatePreset(self, preset):
if preset == "default":
self.object.ticks = "*"
else:
self.object.ticks = preset
self.preview.update()
def updateShowMiniTicks(self, state):
self.object.show_miniticks = (state == QtCore.Qt.Checked)
self.preview.update()
def updateMiniTicks(self, mini_count):
self.object.minitick_count = int(mini_count)
self.preview.update()
# Update tick count
def updateTicks(self, value):
if self.negative_check.checkState() == QtCore.Qt.Checked:
self.object.numticks = -value
self.step_edit.setText(str(-self.object.step))
else:
self.object.numticks = value
self.step_edit.setText(str(self.object.step))
self.state = "count"
self.preview.update()
def updateStep(self):
try:
cur_val = float(self.step_edit.text())
except ValueError:
return
if cur_val < 0:
self.negative_check.setCheckState(QtCore.Qt.Checked)
else:
self.negative_check.setCheckState(QtCore.Qt.Unchecked)
self.object.step = cur_val
self.state = "step"
self.ticks_slider.setValue(self.object.numticks)
self.preview.update()
def updateAxisWithDict(self, dict):
float_dict = {float(key): value for key, value in dict.items()}
self.object.ticks = float_dict
self.preview.update()
def updateTickSign(self):
checked = self.negative_check.isChecked()
# probably a better way of doing this
if not self.object.numticks:
self.step_edit.setText(str(self.object.step))
self.state = "step"
val = float(self.step_edit.text())
if self.state == "count":
value = self.object.numticks
if checked:
self.object.numticks = -value
else:
self.object.numticks = value
if self.state == "step":
self.object.step = str(-val)
self.step_edit.setText(str(-val))
self.preview.update()
| |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/bitcoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/bitcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
print("Checksum did not match")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'bitcoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/bitcoin/bitcoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['bitcoind', 'bitcoin-cli', 'bitcoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| |
# Copyright(c) 2014, The scLVM developers (Forian Buettner, Paolo Francesco Casale, Oliver Stegle)
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import division, print_function, absolute_import
import sys
#import limix #make sure we use the right limix version
from utils.misc import dumpDictHdf5
from utils.misc import PCA
from utils.misc import warning_on_one_line
import limix_legacy
import limix_legacy.deprecated.modules.panama as PANAMA
import limix_legacy.deprecated.modules.varianceDecomposition as VAR
import limix_legacy.deprecated.modules.qtl as QTL
import scipy as SP
import scipy.linalg
import scipy.stats
import pdb
import h5py
import time
import copy
import warnings
import os
class scLVM(object):
"""
Single Cell Latent Varible Model module (scLVM)
This class takes care of fitting and interpreting latent variable models to account for confounders in single-cell RNA-Seq data
This module requires LIMIX
"""
def __init__(self,Y,geneID=None,tech_noise=None):
"""
Args:
Y: gene expression matrix [N, G]
geneID: G vector of geneIDs
tech_noise: G vector of tech_noise
"""
#store dimensions
self.N = Y.shape[0]
self.G = Y.shape[1]
#set data
self.Y = Y
self.geneID = geneID
self.tech_noise = None
self.var=None
if tech_noise is not None:
self.set_tech_noise(tech_noise)
def fitGPLVM(self,idx=None,k=1,standardize=False,out_dir='./cache',file_name=None,recalc=False, use_ard=False, save_K=True):
"""
Args:
idx: index of the genes involved
(e.g., for capturing cell cycle, index of cell cycle genes)
k: number of latent factors
standardize: if True, rescale gene expression by std prior to fitting GPLVM
(data are always mean-centered)
out_dir: dir used to cache the results
file_name: if not None, caches the results in the out_dir if the file does not exist
if the file exists loads the results if recalc is True
recalc: if True and cache file exists, rewrite cacheFile
use_ard: use automatic relevance detection (switch off unimportant factors)
Returns:
X: hidden variable
Kconf: similarity matrix based on the confounding effect (XX.T)
varGPLVM: variance contributions of latent factors and residual biological noise
"""
assert idx is not None, 'scLVM:: specify idx'
if use_ard==True and k<2:
warnings.formatwarning = warning_on_one_line
warnings.warn('when using ARD consider choosing k>1')
file_out = os.path.join(out_dir,file_name)
if not os.path.exists(file_out) or recalc==True:
# prepare data
Yconf = self.Y[:,idx]
Yconf-= Yconf.mean(0)
# fit gplvm
panama = PANAMA.PANAMA(Y=Yconf,use_Kpop=False,standardize=standardize)
panama.train(rank=k,LinearARD=use_ard)
X = panama.get_Xpanama()
Kconf = panama.get_Kpanama()
var = panama.get_varianceComps()
if use_ard==False:
varGPLVM = {'K':var['Kpanama'],'noise':var['noise']}
else:
varGPLVM = {'X_ARD':var['LinearARD'],'noise':var['noise']}
# export results
if save_K==True:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fout = h5py.File(file_out,'w')
RV = {'X':X,'Kconf':Kconf}
RV['cc_noise_filtered'] = idx
dumpDictHdf5(RV,fout)
dumpDictHdf5(varGPLVM,fout)
fout.close()
else:
# load results from the file
f = h5py.File(file_out,'r')
X = f['X'][:]; Kconf = f['Kconf'][:]
if use_ard==False:
varGPLVM = {'K':f['K'][:],'noise':f['noise'][:]}
else:
varGPLVM = {'X_ARD':f['X_ARD'][:],'noise':f['noise'][:]}
f.close()
return X,Kconf,varGPLVM
def set_tech_noise(self,tech_noise):
"""
Args:
tech_noise: G vector of technical noise
"""
assert tech_noise.shape[0]==self.G, 'scLVM:: tech_noise dimension dismatch'
self.tech_noise = tech_noise
def varianceDecomposition(self,K=None,tech_noise=None,idx=None,i0=None,i1=None,max_iter=10,verbose=False):
"""
Args:
K: list of random effects to be considered in the analysis
idx: indices of the genes to be considered in the analysis
i0: gene index from which the anlysis starts
i1: gene index to which the analysis stops
max_iter: maximum number of random restarts
verbose: if True, print progresses
"""
if tech_noise is not None: self.set_tech_noise(tech_noise)
assert self.tech_noise is not None, 'scLVM:: specify technical noise'
assert K is not None, 'scLVM:: specify K'
if not isinstance(K, list):
K = [K]
for k in K:
assert k.shape[0]==self.N, 'scLVM:: K dimension dismatch'
assert k.shape[1]==self.N, 'scLVM:: K dimension dismatch'
if idx is None:
if i0 is None or i1 is None:
i0 = 0; i1 = self.G
idx = SP.arange(i0,i1)
elif not isinstance(idx, SP.ndarray):
idx = SP.array([idx])
_G = len(idx)
var = SP.zeros((_G,len(K)+2))
_idx = SP.zeros(_G)
geneID = SP.zeros(_G,dtype=str)
conv = SP.zeros(_G)==1
Ystar = [SP.zeros((self.N,_G)) for i in range(len(K))]
count = 0
Ystd = self.Y-self.Y.mean(0) #delta optimization might be more efficient
Ystd/= self.Y.std(0)
tech_noise = self.tech_noise/SP.array(self.Y.std(0))**2
for ids in idx:
if verbose:
print('.. fitting gene %d'%ids)
# extract a single gene
y = Ystd[:,ids:ids+1]
# build and fit variance decomposition model
vc= VAR.VarianceDecomposition(y)
vc.addFixedEffect()
for k in K:
vc.addRandomEffect(k)
vc.addRandomEffect(SP.eye(self.N))
vc.addRandomEffect(SP.eye(self.N))
vc.vd.getTerm(len(K)+1).getKcf().setParamMask(SP.zeros(1))
for iter_i in range(max_iter):
scales0 = y.std()*SP.randn(len(K)+2)
scales0[len(K)+1]=SP.sqrt(tech_noise[ids]);
_conv = vc.optimize(scales0=scales0, n_times=2)
if _conv: break
conv[count] = _conv
if self.geneID is not None: geneID[count] = self.geneID[ids]
if not _conv:
var[count,-2] = SP.maximum(0,y.var()-tech_noise[ids])
var[count,-1] = tech_noise[ids]
count+=1;
continue
_var = vc.getVarianceComps()[0,:]
KiY = vc.gp.agetKEffInvYCache().ravel()
for ki in range(len(K)):
Ystar[ki][:,count]=_var[ki]*SP.dot(K[ki],KiY)
var[count,:] = _var
count+=1;
# col header
col_header = ['hidden_%d'%i for i in range(len(K))]
col_header.append('biol_noise')
col_header.append('tech_noise')
col_header = SP.array(col_header)
# annotate column and rows of var and Ystar
var_info = {'gene_idx':idx,'col_header':col_header,'conv':conv}
if geneID is not None: var_info['geneID'] = SP.array(geneID)
Ystar_info = {'gene_idx':idx,'conv':conv}
if geneID is not None: Ystar_info['geneID'] = SP.array(geneID)
# cache stuff
self.var = var
self.Ystar = Ystar
self.var_info = var_info
self.Ystar_info = Ystar_info
def getVarianceComponents(self,normalize=False):
"""
Returns:
var: variance component matrix [G_0,m]
(m = number of variance components=len(K)+2)
(G0 = genes which were considered)
normalize: if True, variance components are normalized to sum up to 1 (default False)
var_info: dictionary annotating rows and columns of var
gene_idx: index of the gene in the full matrix
col_header: labels of variance components in the matrix
conv: boolean vetor marking genes for which variance decomposition has converged
geneID: annotate rows of the variance component matrix
"""
assert self.var is not None, 'scLVM:: use varianceDecomposition method before'
if normalize: var = self.var/self.var.sum(1)[:,SP.newaxis]
else: var = self.var
return var, self.var_info
def getPredictions(self):
"""
Returns:
Ystar: predictions [N,G_0]
(G0 = genes which were considered in the analysis)
Remark: if more than 1 random effect is considered Ystar is a list of predicion matrices, ones per random effec
Ystar_info: annotate rows and columns of Ystar
gene_idx: index of the gene in the full matrix
conv: boolean vetor marking genes for which variance decomposition has converged
geneID: annotate rows of the variance component matrix
"""
assert self.var is not None, 'scLVM:: use varianceDecomposition method before'
if len(self.Ystar)==1: Ystar = self.Ystar[0]
else: Ystar = self.Ystar
return Ystar, self.Ystar_info
def getCorrectedExpression(self,rand_eff_ids=None):
"""
Args:
rand_eff_ids: index of the random effects that are to consider for the correction
Returns:
Ycorr: corrected expression levels
"""
assert self.var is not None, 'scLVM:: use varianceDecomposition method before'
# check rand_eff_ids
if rand_eff_ids is None:
rand_eff_ids = list(range(len(self.Ystar)))
elif not isinstance(rand_eff_ids, list):
rand_eff_ids = [rand_eff_ids]
# loop on random effect to consider and correct
#predicitive means were calculated for standardised expression
Ystd = self.Y-self.Y.mean(0)
Ystd/= self.Y.std(0)
Ycorr = Ystd[:,self.Ystar_info['gene_idx']]#copy.copy(self.Y[:,self.Ystar_info['gene_idx']])
for i in rand_eff_ids:
Ycorr -= self.Ystar[i]
Ycorr*=self.Y[:,self.Ystar_info['gene_idx']].std(0) #bring back to original scale
Ycorr+=self.Y[:,self.Ystar_info['gene_idx']].mean(0)
return Ycorr
def fitLMM(self,K=None,tech_noise=None,idx=None,i0=None,i1=None,verbose=False):
"""
Args:
K: list of random effects to be considered in the analysis
if K is none, it does not consider any random effect
idx: indices of the genes to be considered in the analysis
i0: gene index from which the anlysis starts
i1: gene index to which the analysis stops
verbose: if True, print progresses
Returns:
pv: matrix of pvalues
beta: matrix of correlations
info: dictionary annotates pv and beta rows and columns, containing
gene_idx_row: index of the genes in rows
conv: boolean vetor marking genes for which variance decomposition has converged
gene_row: annotate rows of matrices
"""
assert self.var is not None, 'scLVM:: when multiple hidden factors are considered, varianceDecomposition decomposition must be used prior to this method'
# print QTL
if idx is None:
if i0 is None or i1 is None:
i0 = 0
i1 = self.G
idx = SP.arange(i0,i1)
elif not isinstance(idx, SP.ndarray):
idx = SP.array([idx])
if K is not None and not isinstance(K, list):
K = [K]
lmm_params = {'covs':SP.ones([self.N,1]),'NumIntervalsDeltaAlt':100,'NumIntervalsDelta0':100,'searchDelta':True}
Ystd = self.Y-self.Y.mean(0)
Ystd/= self.Y.std(0)
beta = SP.zeros((idx.shape[0],self.G))
pv = SP.zeros((idx.shape[0],self.G))
geneID = SP.zeros(idx.shape[0],dtype=str)
count = 0
var = self.var/self.var.sum(1)[:,SP.newaxis]
for ids in idx:
if verbose:
print('.. fitting gene %d'%ids)
# extract a single gene
if K is not None:
if len(K)>1:
if self.var_info['conv'][count]==True:
_K = SP.sum([var[count,i]*K[i] for i in range(len(K))],0)
_K/= _K.diagonal().mean()
else:
_K = None
else:
_K = K[0]
else:
_K = None
lm = QTL.test_lmm(Ystd,Ystd[:,ids:ids+1],K=_K,verbose=False,**lmm_params)
pv[count,:] = lm.getPv()[0,:]
beta[count,:] = lm.getBetaSNP()[0,:]
if self.geneID is not None: geneID[count] = self.geneID[ids]
count+=1
info = {'conv':self.var_info['conv'],'gene_idx_row':idx}
if geneID is not None: info['gene_row'] = geneID
return pv, beta, info
| |
"""
Views for financialaid
"""
import json
from functools import reduce
from django.conf import settings
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib.auth.models import User
from django.db.models import F, Q
from django.views.generic import ListView
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.exceptions import ValidationError
from rest_framework.generics import (
CreateAPIView,
get_object_or_404,
UpdateAPIView
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from rolepermissions.checkers import (
has_object_permission,
has_role,
)
from courses.models import Program
from dashboard.models import ProgramEnrollment
from dashboard.permissions import CanReadIfStaffOrSelf
from financialaid.api import (
get_formatted_course_price,
get_no_discount_tier_program,
)
from financialaid.constants import (
FinancialAidJustification,
FinancialAidStatus
)
from financialaid.models import (
FinancialAid,
TierProgram
)
from financialaid.permissions import (
UserCanEditFinancialAid,
FinancialAidUserMatchesLoggedInUser
)
from financialaid.serializers import (
FinancialAidActionSerializer,
FinancialAidRequestSerializer,
FinancialAidSerializer,
FormattedCoursePriceSerializer,
)
from mail.serializers import GenericMailSerializer
from micromasters.utils import now_in_utc
from roles.models import (
Instructor,
Staff,
)
from roles.roles import Permissions
class FinancialAidRequestView(CreateAPIView):
"""
View for financial aid request API. Takes income, currency, and program, then determines whether review
is necessary, and if not, sets the appropriate tier for personalized pricing.
"""
serializer_class = FinancialAidRequestSerializer
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated, )
def get_queryset(self): # pragma: no cover
"""
Allows the DRF helper pages to load - not available in production
"""
return None
class FinancialAidSkipView(UpdateAPIView):
"""
View for financial aid skip API. Takes user and program, then determines whether a financial
aid object exists, and then either creates or updates a financial aid object to reflect
the user skipping financial aid.
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated, )
def update(self, request, *args, **kwargs):
"""
Overrides get_object in case financialaid object does not exist, as the learner may skip
financial aid either after starting the process or in lieu of applying
"""
user = request.user
program = get_object_or_404(Program, id=self.kwargs["program_id"])
if not program.financial_aid_availability:
raise ValidationError("Financial aid not available for this program.")
if not ProgramEnrollment.objects.filter(program=program.id, user=user).exists():
raise ValidationError("User not in program.")
financialaid = FinancialAid.objects.filter(
user=user,
tier_program__program=program,
).exclude(status=FinancialAidStatus.RESET).first()
if financialaid is None:
financialaid = FinancialAid(
user=user,
country_of_income=user.profile.country,
date_exchange_rate=now_in_utc(),
country_of_residence=user.profile.country,
)
if financialaid.status in FinancialAidStatus.TERMINAL_STATUSES:
raise ValidationError("Financial aid application cannot be skipped once it's been approved or skipped.")
financialaid.tier_program = get_no_discount_tier_program(program.id)
financialaid.status = FinancialAidStatus.SKIPPED
financialaid.save_and_log(user)
return Response(status=HTTP_200_OK)
class ReviewFinancialAidView(UserPassesTestMixin, ListView):
"""
View for reviewing financial aid requests.
Note: In the future, it may be worth factoring out the code for sorting into its own subclass of ListView
"""
paginate_by = 50
context_object_name = "financial_aid_objects"
template_name = "review_financial_aid.html"
# If user doesn't pass test_func, raises exception instead of redirecting to login url
raise_exception = True
# Used to modify queryset and in context
search_query = None
selected_status = None
program = None
course_price = None
default_status = FinancialAidStatus.PENDING_MANUAL_APPROVAL
# Used for sorting
sort_field = None
sort_direction = ""
sort_fields = {
"adjusted_cost": {
"display": "Adjusted Cost"
},
"date_calculated": {
"display": "Date Calculated"
},
"last_name": {
"display": "Name/Location"
},
"reported_income": {
"display": "Income/Yr."
},
"date_documents_sent": {
"display": "Date Docs Sent"
}
}
sort_field_mappings = {
"date_calculated": "created_on",
"last_name": "user__profile__last_name",
"reported_income": "income_usd",
}
default_sort_field = "last_name"
def test_func(self):
"""
Validate user permissions (Analogous to permissions_classes for DRF)
"""
self.program = get_object_or_404(
Program,
id=self.kwargs["program_id"], # pylint: disable=unsubscriptable-object
live=True,
financial_aid_availability=True
)
return has_object_permission(Permissions.CAN_EDIT_FINANCIAL_AID, self.request.user, self.program)
def get_context_data(self, **kwargs): # pylint: disable=arguments-differ
"""
Gets context for view
"""
context = super().get_context_data(**kwargs)
# Constants required in view
context["selected_status"] = self.selected_status
context["statuses"] = FinancialAidStatus
context["justifications"] = FinancialAidJustification.ALL_JUSTIFICATIONS
context["email_serializer"] = GenericMailSerializer()
context["current_sort_field"] = "{sort_direction}{sort_field}".format(
sort_direction=self.sort_direction,
sort_field=self.sort_field
)
context["current_program_id"] = self.program.id
context["tier_programs"] = TierProgram.objects.filter(
program_id=context["current_program_id"],
current=True
).order_by(
"discount_amount"
).annotate(
adjusted_cost=self.course_price - F("discount_amount")
)
context["search_query"] = self.search_query
# Create ordered list of (financial aid status, financial message)
message_order = (
FinancialAidStatus.AUTO_APPROVED,
FinancialAidStatus.PENDING_DOCS,
FinancialAidStatus.DOCS_SENT,
FinancialAidStatus.PENDING_MANUAL_APPROVAL,
FinancialAidStatus.APPROVED,
FinancialAidStatus.SKIPPED,
)
context["financial_aid_statuses"] = (
(status, FinancialAidStatus.STATUS_MESSAGES_DICT[status])
for status in message_order
)
# Get sort field information
new_sort_direction = "" if self.sort_direction == "-" else "-"
for field, field_dict in self.sort_fields.items():
# For appending the sort_by get param on url
field_dict["sort_field"] = "{sort_direction}{sort_field}".format(
# If this field is our current sort field, we want to toggle the sort direction, else default ""
sort_direction=new_sort_direction if field == self.sort_field else "",
sort_field=field
)
# If this field is the current sort field, we want to indicate the current sort direction
field_dict["direction_display"] = self.sort_direction if field == self.sort_field else None
context["sort_fields"] = self.sort_fields
# Required for styling
js_settings = {
"gaTrackingID": settings.GA_TRACKING_ID,
"reactGaDebug": settings.REACT_GA_DEBUG,
"authenticated": not self.request.user.is_anonymous,
"edx_base_url": settings.EDXORG_BASE_URL,
"mitxonline_base_url": settings.MITXONLINE_BASE_URL,
}
context["js_settings_json"] = json.dumps(js_settings)
context["authenticated"] = not self.request.user.is_anonymous
context["is_public"] = False
context["has_zendesk_widget"] = True
context["is_staff"] = has_role(self.request.user, [Staff.ROLE_ID, Instructor.ROLE_ID])
return context
def get_queryset(self):
"""
Gets queryset for ListView to return to view
"""
# Filter by program (self.program set in test_func())
financial_aids = FinancialAid.objects.filter(
tier_program__program=self.program
)
# Filter by status
self.selected_status = self.kwargs.get("status", None)
if self.selected_status is None or self.selected_status not in FinancialAidStatus.ALL_STATUSES:
self.selected_status = self.default_status
financial_aids = financial_aids.filter(status=self.selected_status)
# Filter by search query
self.search_query = self.request.GET.get("search_query", "")
search_query = reduce(
lambda q, term: (
q |
Q(user__profile__first_name__icontains=term) |
Q(user__profile__last_name__icontains=term) |
Q(user__username__icontains=term) |
Q(user__email__icontains=term)
),
self.search_query.split(),
Q()
)
if search_query:
financial_aids = financial_aids.filter(search_query)
# Annotate with adjusted cost
self.course_price = self.program.price
financial_aids = financial_aids.annotate(adjusted_cost=self.course_price - F("tier_program__discount_amount"))
# Sort by field
self.sort_field = self.request.GET.get("sort_by", self.default_sort_field)
if self.sort_field.startswith("-"):
self.sort_field = self.sort_field[1:]
# Defined above: self.sort_direction = ""
self.sort_direction = "-"
if self.sort_field not in self.sort_fields:
self.sort_field = self.default_sort_field
self.sort_direction = ""
financial_aids = financial_aids.order_by(
"{sort_direction}{sort_field}".format(
sort_direction=self.sort_direction,
sort_field=self.sort_field_mappings.get(self.sort_field, self.sort_field)
)
)
return financial_aids
class FinancialAidActionView(UpdateAPIView):
"""
View for modifying financial aid request statuses as a Staff user
"""
serializer_class = FinancialAidActionSerializer
permission_classes = (IsAuthenticated, UserCanEditFinancialAid)
lookup_field = "id"
lookup_url_kwarg = "financial_aid_id"
queryset = FinancialAid.objects.all()
class FinancialAidDetailView(UpdateAPIView):
"""
View for updating a FinancialAid record
"""
serializer_class = FinancialAidSerializer
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated, FinancialAidUserMatchesLoggedInUser)
lookup_field = "id"
lookup_url_kwarg = "financial_aid_id"
queryset = FinancialAid.objects.all()
class CoursePriceListView(APIView):
"""
View for retrieving a learner's price for course runs in all enrolled programs
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated, CanReadIfStaffOrSelf)
def get(self, request, username, *args, **kwargs): # pylint: disable=unused-argument
"""
GET handler
"""
user = get_object_or_404(
User,
username=username,
)
program_enrollments = (
ProgramEnrollment.objects
.select_related('user', 'program')
.filter(user=user, program__live=True).all()
)
formatted_course_prices = [
get_formatted_course_price(program_enrollment)
for program_enrollment in program_enrollments
]
serializer = FormattedCoursePriceSerializer(
formatted_course_prices,
many=True
)
return Response(data=serializer.data)
class CoursePriceDetailView(APIView):
"""
View for retrieving a learner's price for a course run
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated, )
def get(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
GET handler
"""
user = request.user
program_enrollment = get_object_or_404(
ProgramEnrollment,
user=user,
program__id=self.kwargs["program_id"],
program__live=True
)
serializer = FormattedCoursePriceSerializer(
get_formatted_course_price(program_enrollment)
)
return Response(data=serializer.data)
| |
from __future__ import print_function, division
from sympy.core.singleton import S
from sympy.core.function import Function
from sympy.core import Add
from sympy.core.evalf import get_integer_part, PrecisionExhausted
from sympy.core.numbers import Integer
from sympy.core.relational import Gt, Lt, Ge, Le
from sympy.core.symbol import Symbol
###############################################################################
######################### FLOOR and CEILING FUNCTIONS #########################
###############################################################################
class RoundFunction(Function):
"""The base class for rounding functions."""
@classmethod
def eval(cls, arg):
from sympy import im
if arg.is_integer:
return arg
if arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
i = im(arg)
if not i.has(S.ImaginaryUnit):
return cls(i)*S.ImaginaryUnit
return cls(arg, evaluate=False)
v = cls._eval_number(arg)
if v is not None:
return v
# Integral, numerical, symbolic part
ipart = npart = spart = S.Zero
# Extract integral (or complex integral) terms
terms = Add.make_args(arg)
for t in terms:
if t.is_integer or (t.is_imaginary and im(t).is_integer):
ipart += t
elif t.has(Symbol):
spart += t
else:
npart += t
if not (npart or spart):
return ipart
# Evaluate npart numerically if independent of spart
if npart and (
not spart or
npart.is_real and (spart.is_imaginary or (S.ImaginaryUnit*spart).is_real) or
npart.is_imaginary and spart.is_real):
try:
r, i = get_integer_part(
npart, cls._dir, {}, return_ints=True)
ipart += Integer(r) + Integer(i)*S.ImaginaryUnit
npart = S.Zero
except (PrecisionExhausted, NotImplementedError):
pass
spart += npart
if not spart:
return ipart
elif spart.is_imaginary or (S.ImaginaryUnit*spart).is_real:
return ipart + cls(im(spart), evaluate=False)*S.ImaginaryUnit
else:
return ipart + cls(spart, evaluate=False)
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_integer(self):
return self.args[0].is_real
class floor(RoundFunction):
"""
Floor is a univariate function which returns the largest integer
value not greater than its argument. However this implementation
generalizes floor to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/FloorFunction.html.
>>> from sympy import floor, E, I, Float, Rational
>>> floor(17)
17
>>> floor(Rational(23, 10))
2
>>> floor(2*E)
5
>>> floor(-Float(0.567))
-1
>>> floor(-I/2)
-I
See Also
========
ceiling
"""
_dir = -1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return Integer(arg.p // arg.q)
elif arg.is_Float:
return Integer(int(arg.floor()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[0]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r
else:
return r - 1
else:
return r
def __le__(self, other):
if self.args[0] == other and other.is_real:
return S.true
return Le(self, other, evaluate=False)
def __gt__(self, other):
if self.args[0] == other and other.is_real:
return S.false
return Gt(self, other, evaluate=False)
class ceiling(RoundFunction):
"""
Ceiling is a univariate function which returns the smallest integer
value not less than its argument. Ceiling function is generalized
in this implementation to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/CeilingFunction.html.
>>> from sympy import ceiling, E, I, Float, Rational
>>> ceiling(17)
17
>>> ceiling(Rational(23, 10))
3
>>> ceiling(2*E)
6
>>> ceiling(-Float(0.567))
0
>>> ceiling(I/2)
I
See Also
========
floor
"""
_dir = 1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return -Integer(-arg.p // arg.q)
elif arg.is_Float:
return Integer(int(arg.ceiling()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[1]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r + 1
else:
return r
else:
return r
def __lt__(self, other):
if self.args[0] == other and other.is_real:
return S.false
return Lt(self, other, evaluate=False)
def __ge__(self, other):
if self.args[0] == other and other.is_real:
return S.true
return Ge(self, other, evaluate=False)
| |
# pylint: disable=C0302,R0204
'''
RESTful API for MyTardis models and data.
Implemented with Tastypie.
.. moduleauthor:: Grischa Meyer <grischa@gmail.com>
'''
import json
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse, HttpResponseForbidden, \
StreamingHttpResponse
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authentication import SessionAuthentication
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.constants import ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound
from tastypie.exceptions import Unauthorized
from tastypie.http import HttpUnauthorized
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.utils import trailing_slash
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tardis.tardis_portal import tasks
from tardis.tardis_portal.auth.decorators import \
get_accessible_datafiles_for_user
from tardis.tardis_portal.auth.decorators import has_datafile_access
from tardis.tardis_portal.auth.decorators import has_datafile_download_access
from tardis.tardis_portal.auth.decorators import has_dataset_access
from tardis.tardis_portal.auth.decorators import has_dataset_write
from tardis.tardis_portal.auth.decorators import has_delete_permissions
from tardis.tardis_portal.auth.decorators import has_experiment_access
from tardis.tardis_portal.auth.decorators import has_write_permissions
from tardis.tardis_portal.auth.localdb_auth import django_user
from tardis.tardis_portal.models import ObjectACL
from tardis.tardis_portal.models.datafile import DataFile, compute_checksums
from tardis.tardis_portal.models.datafile import DataFileObject
from tardis.tardis_portal.models.dataset import Dataset
from tardis.tardis_portal.models.experiment import Experiment
from tardis.tardis_portal.models.parameters import DatafileParameter
from tardis.tardis_portal.models.parameters import DatafileParameterSet
from tardis.tardis_portal.models.parameters import DatasetParameter
from tardis.tardis_portal.models.parameters import DatasetParameterSet
from tardis.tardis_portal.models.parameters import ExperimentParameter
from tardis.tardis_portal.models.parameters import ExperimentParameterSet
from tardis.tardis_portal.models.parameters import ParameterName
from tardis.tardis_portal.models.parameters import Schema
from tardis.tardis_portal.models.storage import StorageBox
from tardis.tardis_portal.models.storage import StorageBoxOption
from tardis.tardis_portal.models.storage import StorageBoxAttribute
from tardis.tardis_portal.models.facility import Facility
from tardis.tardis_portal.models.facility import facilities_managed_by
from tardis.tardis_portal.models.instrument import Instrument
class PrettyJSONSerializer(Serializer):
json_indent = 2
def to_json(self, data, options=None):
options = options or {}
data = self.to_simple(data, options)
return json.dumps(data, cls=json.JSONEncoder,
sort_keys=True, ensure_ascii=False,
indent=self.json_indent) + "\n"
if settings.DEBUG:
default_serializer = PrettyJSONSerializer()
else:
default_serializer = Serializer()
class MyTardisAuthentication(object):
'''
custom tastypie authentication that works with both anonymous use and
a number of available auth mechanisms.
'''
def is_authenticated(self, request, **kwargs): # noqa # too complex
'''
handles backends explicitly so that it can return False when
credentials are given but wrong and return Anonymous User when
credentials are not given or the session has expired (web use).
'''
auth_info = request.META.get('HTTP_AUTHORIZATION')
if 'HTTP_AUTHORIZATION' not in request.META:
if hasattr(request.user, 'allowed_tokens'):
tokens = request.user.allowed_tokens
session_auth = SessionAuthentication()
check = session_auth.is_authenticated(request, **kwargs)
if check:
if isinstance(check, HttpUnauthorized):
session_auth_result = False
else:
request._authentication_backend = session_auth
session_auth_result = check
else:
request.user = AnonymousUser()
session_auth_result = True
request.user.allowed_tokens = tokens
return session_auth_result
else:
if auth_info.startswith('Basic'):
basic_auth = BasicAuthentication()
check = basic_auth.is_authenticated(request, **kwargs)
if check:
if isinstance(check, HttpUnauthorized):
return False
else:
request._authentication_backend = basic_auth
return check
if auth_info.startswith('ApiKey'):
apikey_auth = ApiKeyAuthentication()
check = apikey_auth.is_authenticated(request, **kwargs)
if check:
if isinstance(check, HttpUnauthorized):
return False
else:
request._authentication_backend = apikey_auth
return check
def get_identifier(self, request):
try:
return request._authentication_backend.get_identifier(request)
except AttributeError:
return 'nouser'
default_authentication = MyTardisAuthentication()
class ACLAuthorization(Authorization):
'''Authorisation class for Tastypie.
'''
def read_list(self, object_list, bundle): # noqa # too complex
obj_ids = [obj.id for obj in object_list]
if bundle.request.user.is_authenticated() and \
bundle.request.user.is_superuser:
return object_list
if isinstance(bundle.obj, Experiment):
experiments = Experiment.safe.all(bundle.request.user)
return experiments.filter(id__in=obj_ids)
elif isinstance(bundle.obj, ExperimentParameterSet):
experiments = Experiment.safe.all(bundle.request.user)
return ExperimentParameterSet.objects.filter(
experiment__in=experiments, id__in=obj_ids)
elif isinstance(bundle.obj, ExperimentParameter):
experiments = Experiment.safe.all(bundle.request.user)
return ExperimentParameter.objects.filter(
parameterset__experiment__in=experiments,
id__in=obj_ids
)
elif isinstance(bundle.obj, Dataset):
return [ds for ds in object_list
if has_dataset_access(bundle.request, ds.id)]
elif isinstance(bundle.obj, DatasetParameterSet):
return [dps for dps in object_list
if has_dataset_access(bundle.request, dps.dataset.id)]
elif isinstance(bundle.obj, DatasetParameter):
return [dp for dp in object_list
if has_dataset_access(bundle.request,
dp.parameterset.dataset.id)]
elif isinstance(bundle.obj, DataFile):
all_files = get_accessible_datafiles_for_user(bundle.request)
return all_files.filter(id__in=obj_ids)
elif isinstance(bundle.obj, DatafileParameterSet):
datafiles = get_accessible_datafiles_for_user(bundle.request)
return DatafileParameterSet.objects.filter(
datafile__in=datafiles, id__in=obj_ids
)
elif isinstance(bundle.obj, DatafileParameter):
datafiles = get_accessible_datafiles_for_user(bundle.request)
return DatafileParameter.objects.filter(
parameterset__datafile__in=datafiles, id__in=obj_ids)
elif isinstance(bundle.obj, Schema):
return object_list
elif isinstance(bundle.obj, ParameterName):
return object_list
elif isinstance(bundle.obj, ObjectACL):
experiment_ids = Experiment.safe.all(
bundle.request.user).values_list('id', flat=True)
return ObjectACL.objects.filter(
content_type__model='experiment',
object_id__in=experiment_ids,
id__in=obj_ids
)
elif bundle.request.user.is_authenticated() and \
isinstance(bundle.obj, User):
if len(facilities_managed_by(bundle.request.user)) > 0:
return object_list
else:
return [user for user in object_list if
(user == bundle.request.user or
user.experiment_set.filter(public_access__gt=1)
.count() > 0)]
elif isinstance(bundle.obj, Group):
if facilities_managed_by(bundle.request.user).count() > 0:
return object_list
else:
return bundle.request.user.groups.filter(id__in=obj_ids)
elif isinstance(bundle.obj, Facility):
facilities = facilities_managed_by(bundle.request.user)
return [facility for facility in object_list
if facility in facilities]
elif isinstance(bundle.obj, Instrument):
facilities = facilities_managed_by(bundle.request.user)
instruments = Instrument.objects.filter(facility__in=facilities)
return [instrument for instrument in object_list
if instrument in instruments]
elif isinstance(bundle.obj, StorageBox):
return object_list
elif isinstance(bundle.obj, StorageBoxOption):
return [option for option in object_list
if option.key in StorageBoxOptionResource.accessible_keys]
elif isinstance(bundle.obj, StorageBoxAttribute):
return object_list
else:
return []
def read_detail(self, object_list, bundle): # noqa # too complex
if bundle.request.user.is_authenticated() and \
bundle.request.user.is_superuser:
return True
if isinstance(bundle.obj, Experiment):
return has_experiment_access(bundle.request, bundle.obj.id)
elif isinstance(bundle.obj, ExperimentParameterSet):
return has_experiment_access(
bundle.request, bundle.obj.experiment.id)
elif isinstance(bundle.obj, ExperimentParameter):
return has_experiment_access(
bundle.request, bundle.obj.parameterset.experiment.id)
elif isinstance(bundle.obj, Dataset):
return has_dataset_access(bundle.request, bundle.obj.id)
elif isinstance(bundle.obj, DatasetParameterSet):
return has_dataset_access(bundle.request, bundle.obj.dataset.id)
elif isinstance(bundle.obj, DatasetParameter):
return has_dataset_access(
bundle.request, bundle.obj.parameterset.dataset.id)
elif isinstance(bundle.obj, DataFile):
return has_datafile_access(bundle.request, bundle.obj.id)
elif isinstance(bundle.obj, DatafileParameterSet):
return has_datafile_access(
bundle.request, bundle.obj.datafile.id)
elif isinstance(bundle.obj, DatafileParameter):
return has_datafile_access(
bundle.request, bundle.obj.parameterset.datafile.id)
elif isinstance(bundle.obj, User):
# allow all authenticated users to read public user info
# the dehydrate function also adds/removes some information
authenticated = bundle.request.user.is_authenticated()
public_user = bundle.obj.experiment_set.filter(
public_access__gt=1).count() > 0
return public_user or authenticated
elif isinstance(bundle.obj, Schema):
return True
elif isinstance(bundle.obj, ParameterName):
return True
elif isinstance(bundle.obj, StorageBox):
return bundle.request.user.is_authenticated()
elif isinstance(bundle.obj, StorageBoxOption):
return bundle.request.user.is_authenticated() and \
bundle.obj.key in StorageBoxOptionResource.accessible_keys
elif isinstance(bundle.obj, StorageBoxAttribute):
return bundle.request.user.is_authenticated()
elif isinstance(bundle.obj, Group):
return bundle.obj in bundle.request.user.groups.all()
elif isinstance(bundle.obj, Facility):
return bundle.obj in facilities_managed_by(bundle.request.user)
elif isinstance(bundle.obj, Instrument):
facilities = facilities_managed_by(bundle.request.user)
return bundle.obj.facility in facilities
raise NotImplementedError(type(bundle.obj))
def create_list(self, object_list, bundle):
raise NotImplementedError(type(bundle.obj))
def create_detail(self, object_list, bundle): # noqa # too complex
if not bundle.request.user.is_authenticated():
return False
if bundle.request.user.is_authenticated() and \
bundle.request.user.is_superuser:
return True
if isinstance(bundle.obj, Experiment):
return bundle.request.user.has_perm('tardis_portal.add_experiment')
elif isinstance(bundle.obj, ExperimentParameterSet):
if not bundle.request.user.has_perm(
'tardis_portal.change_experiment'):
return False
experiment_uri = bundle.data.get('experiment', None)
if experiment_uri is not None:
experiment = ExperimentResource.get_via_uri(
ExperimentResource(), experiment_uri, bundle.request)
return has_write_permissions(bundle.request, experiment.id)
elif getattr(bundle.obj.experiment, 'id', False):
return has_write_permissions(bundle.request,
bundle.obj.experiment.id)
return False
elif isinstance(bundle.obj, ExperimentParameter):
return bundle.request.user.has_perm(
'tardis_portal.change_experiment') and \
has_write_permissions(bundle.request,
bundle.obj.parameterset.experiment.id)
elif isinstance(bundle.obj, Dataset):
if not bundle.request.user.has_perm(
'tardis_portal.change_dataset'):
return False
perm = False
for exp_uri in bundle.data.get('experiments', []):
try:
this_exp = ExperimentResource.get_via_uri(
ExperimentResource(), exp_uri, bundle.request)
except:
return False
if has_write_permissions(bundle.request, this_exp.id):
perm = True
else:
return False
return perm
elif isinstance(bundle.obj, DatasetParameterSet):
if not bundle.request.user.has_perm(
'tardis_portal.change_dataset'):
return False
dataset_uri = bundle.data.get('dataset', None)
if dataset_uri is not None:
dataset = DatasetResource.get_via_uri(
DatasetResource(), dataset_uri, bundle.request)
return has_dataset_write(bundle.request, dataset.id)
elif getattr(bundle.obj.dataset, 'id', False):
return has_dataset_write(bundle.request,
bundle.obj.dataset.id)
return False
elif isinstance(bundle.obj, DatasetParameter):
return bundle.request.user.has_perm(
'tardis_portal.change_dataset') and \
has_dataset_write(bundle.request,
bundle.obj.parameterset.dataset.id)
elif isinstance(bundle.obj, DataFile):
dataset = DatasetResource.get_via_uri(DatasetResource(),
bundle.data['dataset'],
bundle.request)
return all([
bundle.request.user.has_perm('tardis_portal.change_dataset'),
bundle.request.user.has_perm('tardis_portal.add_datafile'),
has_dataset_write(bundle.request, dataset.id),
])
elif isinstance(bundle.obj, DatafileParameterSet):
dataset = Dataset.objects.get(
pk=bundle.obj.datafile.dataset.id)
return all([
bundle.request.user.has_perm('tardis_portal.change_dataset'),
bundle.request.user.has_perm('tardis_portal.add_datafile'),
has_dataset_write(bundle.request, dataset.id),
])
elif isinstance(bundle.obj, DatafileParameter):
dataset = Dataset.objects.get(
pk=bundle.obj.parameterset.datafile.dataset.id)
return all([
bundle.request.user.has_perm('tardis_portal.change_dataset'),
bundle.request.user.has_perm('tardis_portal.add_datafile'),
has_dataset_write(bundle.request, dataset.id),
])
elif isinstance(bundle.obj, DataFileObject):
return all([
bundle.request.user.has_perm('tardis_portal.change_dataset'),
bundle.request.user.has_perm('tardis_portal.add_datafile'),
has_dataset_write(bundle.request,
bundle.obj.datafile.dataset.id),
])
elif isinstance(bundle.obj, ObjectACL):
return bundle.request.user.has_perm('tardis_portal.add_objectacl')
elif isinstance(bundle.obj, Group):
return bundle.request.user.has_perm('tardis_portal.add_group')
elif isinstance(bundle.obj, Facility):
return bundle.request.user.has_perm('tardis_portal.add_facility')
elif isinstance(bundle.obj, Instrument):
facilities = facilities_managed_by(bundle.request.user)
return all([
bundle.request.user.has_perm('tardis_portal.add_instrument'),
bundle.obj.facility in facilities
])
raise NotImplementedError(type(bundle.obj))
def update_list(self, object_list, bundle):
raise NotImplementedError(type(bundle.obj))
# allowed = []
# # Since they may not all be saved, iterate over them.
# for obj in object_list:
# if obj.user == bundle.request.user:
# allowed.append(obj)
# return allowed
def update_detail(self, object_list, bundle): # noqa # too complex
if not bundle.request.user.is_authenticated():
return False
if isinstance(bundle.obj, Experiment):
return bundle.request.user.has_perm(
'tardis_portal.change_experiment') and \
has_write_permissions(bundle.request, bundle.obj.id)
elif isinstance(bundle.obj, ExperimentParameterSet):
return bundle.request.user.has_perm(
'tardis_portal.change_experiment') # and \
# has_write_permissions(bundle.request, bundle.obj.experiment.id)
elif isinstance(bundle.obj, ExperimentParameter):
return bundle.request.user.has_perm(
'tardis_portal.change_experiment')
elif isinstance(bundle.obj, Dataset):
return False
elif isinstance(bundle.obj, DatasetParameterSet):
return False
elif isinstance(bundle.obj, DatasetParameter):
return False
elif isinstance(bundle.obj, DataFile):
return bundle.request.user.has_perm('tardis_portal.change_datafile')
elif isinstance(bundle.obj, DatafileParameterSet):
return False
elif isinstance(bundle.obj, DatafileParameter):
return False
elif isinstance(bundle.obj, Schema):
return False
elif isinstance(bundle.obj, Group):
return False
elif isinstance(bundle.obj, Facility):
return False
elif isinstance(bundle.obj, Instrument):
facilities = facilities_managed_by(bundle.request.user)
return bundle.obj.facility in facilities and \
bundle.request.user.has_perm('tardis_portal.change_instrument')
raise NotImplementedError(type(bundle.obj))
def delete_list(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
def delete_detail(self, object_list, bundle):
if isinstance(bundle.obj, Experiment):
return bundle.request.user.has_perm(
'tardis_portal.change_experiment') and \
has_delete_permissions(bundle.request, bundle.obj.id)
raise Unauthorized("Sorry, no deletes.")
def lookup_by_unique_id_only(resource):
'''
returns custom lookup function. initialise with resource type
'''
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
if 'id' not in kwargs and 'pk' not in kwargs:
# new instance is required
return {'id': -1} # this will not match any exisitng resource
return super(resource,
self).lookup_kwargs_with_identifiers(bundle, kwargs)
return lookup_kwargs_with_identifiers
class GroupResource(ModelResource):
class Meta:
queryset = Group.objects.all()
authentication = default_authentication
authorization = ACLAuthorization()
filtering = {
'id': ('exact',),
'name': ('exact',),
}
class UserResource(ModelResource):
groups = fields.ManyToManyField(GroupResource, 'groups',
null=True, full=True)
class Meta:
authentication = default_authentication
authorization = ACLAuthorization()
queryset = User.objects.all()
allowed_methods = ['get']
fields = ['username', 'first_name', 'last_name', 'email']
serializer = default_serializer
filtering = {
'username': ('exact', ),
'email': ('iexact', ),
}
def dehydrate(self, bundle):
'''
use cases:
public user:
anonymous:
name, uri, email, id
authenticated:
other user:
name, uri, email, id [, username if facility manager]
same user:
name, uri, email, id, username
private user:
anonymous:
none
authenticated:
other user:
name, uri, id [, username, email if facility manager]
same user:
name, uri, email, id, username
'''
authuser = bundle.request.user
authenticated = authuser.is_authenticated()
queried_user = bundle.obj
public_user = queried_user.experiment_set.filter(
public_access__gt=1).count() > 0
same_user = authuser == queried_user
# add the database id for convenience
bundle.data['id'] = queried_user.id
# allow the user to find out their username and email
# allow facility managers to query other users' username and email
if authenticated and \
(same_user or facilities_managed_by(authuser).count() > 0):
bundle.data['username'] = queried_user.username
bundle.data['email'] = queried_user.email
else:
del(bundle.data['username'])
del(bundle.data['email'])
# add public information
if public_user:
bundle.data['email'] = queried_user.email
return bundle
class MyTardisModelResource(ModelResource):
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
return lookup_by_unique_id_only(MyTardisModelResource)(
self, bundle, kwargs)
def patch_list(self, request, **kwargs):
return super(MyTardisModelResource, self).patch_list(request, **kwargs)
class Meta:
authentication = default_authentication
authorization = ACLAuthorization()
serializer = default_serializer
class SchemaResource(MyTardisModelResource):
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
return lookup_by_unique_id_only(SchemaResource)(self, bundle, kwargs)
class Meta(MyTardisModelResource.Meta):
queryset = Schema.objects.all()
filtering = {
'id': ('exact', ),
'namespace': ('exact', ),
}
class ParameterNameResource(MyTardisModelResource):
schema = fields.ForeignKey(SchemaResource, 'schema')
class Meta(MyTardisModelResource.Meta):
queryset = ParameterName.objects.all()
filtering = {
'schema': ALL_WITH_RELATIONS,
}
class ParameterResource(MyTardisModelResource):
name = fields.ForeignKey(ParameterNameResource, 'name')
value = fields.CharField(blank=True)
def hydrate(self, bundle):
'''
sets the parametername by uri or name
if untyped value is given, set value via parameter method,
otherwise use modelresource automatisms
'''
try:
parname = ParameterNameResource.get_via_uri(
ParameterNameResource(),
bundle.data['name'], bundle.request)
except NotFound:
parname = bundle.related_obj._get_create_parname(
bundle.data['name'])
del(bundle.data['name'])
bundle.obj.name = parname
if 'value' in bundle.data:
bundle.obj.set_value(bundle.data['value'])
del(bundle.data['value'])
return bundle
class ParameterSetResource(MyTardisModelResource):
schema = fields.ForeignKey(SchemaResource, 'schema', full=True)
def hydrate_schema(self, bundle):
try:
schema = SchemaResource.get_via_uri(SchemaResource(),
bundle.data['schema'],
bundle.request)
except NotFound:
try:
schema = Schema.objects.get(namespace=bundle.data['schema'])
except Schema.DoesNotExist:
raise
bundle.obj.schema = schema
del(bundle.data['schema'])
return bundle
class ExperimentParameterSetResource(ParameterSetResource):
'''API for ExperimentParameterSets
'''
experiment = fields.ForeignKey(
'tardis.tardis_portal.api.ExperimentResource', 'experiment')
parameters = fields.ToManyField(
'tardis.tardis_portal.api.ExperimentParameterResource',
'experimentparameter_set',
related_name='parameterset', full=True, null=True)
class Meta(ParameterSetResource.Meta):
queryset = ExperimentParameterSet.objects.all()
class ExperimentParameterResource(ParameterResource):
parameterset = fields.ForeignKey(ExperimentParameterSetResource,
'parameterset')
class Meta(ParameterResource.Meta):
queryset = ExperimentParameter.objects.all()
class ExperimentResource(MyTardisModelResource):
'''API for Experiments
also creates a default ACL and allows ExperimentParameterSets to be read
and written.
TODO: catch duplicate schema submissions for parameter sets
'''
created_by = fields.ForeignKey(UserResource, 'created_by')
parameter_sets = fields.ToManyField(
ExperimentParameterSetResource,
'experimentparameterset_set',
related_name='experiment',
full=True, null=True)
class Meta(MyTardisModelResource.Meta):
queryset = Experiment.objects.all()
filtering = {
'id': ('exact', ),
'title': ('exact',),
}
always_return_data = True
def dehydrate(self, bundle):
exp = bundle.obj
authors = [{'name': a.author, 'url': a.url}
for a in exp.experimentauthor_set.all()]
bundle.data['authors'] = authors
lic = exp.license
if lic is not None:
bundle.data['license'] = {
'name': lic.name,
'url': lic.url,
'description': lic.internal_description,
'image_url': lic.image_url,
'allows_distribution': lic.allows_distribution,
}
owners = exp.get_owners()
bundle.data['owner_ids'] = [o.id for o in owners]
return bundle
def hydrate_m2m(self, bundle):
'''
create ACL before any related objects are created in order to use
ACL permissions for those objects.
'''
if getattr(bundle.obj, 'id', False):
experiment = bundle.obj
# TODO: unify this with the view function's ACL creation,
# maybe through an ACL toolbox.
acl = ObjectACL(content_type=experiment.get_ct(),
object_id=experiment.id,
pluginId=django_user,
entityId=str(bundle.request.user.id),
canRead=True,
canWrite=True,
canDelete=True,
isOwner=True,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
return super(ExperimentResource, self).hydrate_m2m(bundle)
def obj_create(self, bundle, **kwargs):
'''experiments need at least one ACL to be available through the
ExperimentManager (Experiment.safe)
Currently not tested for failed db transactions as sqlite does not
enforce limits.
'''
user = bundle.request.user
bundle.data['created_by'] = user
bundle = super(ExperimentResource, self).obj_create(bundle, **kwargs)
return bundle
def obj_get_list(self, bundle, **kwargs):
'''
responds to EPN query for Australian Synchrotron
'''
if hasattr(bundle.request, 'GET') and 'EPN' in bundle.request.GET:
epn = bundle.request.GET['EPN']
exp_schema = Schema.objects.get(
namespace='http://www.tardis.edu.au'
'/schemas/as/experiment/2010/09/21')
epn_pn = ParameterName.objects.get(schema=exp_schema, name='EPN')
parameter = ExperimentParameter.objects.get(name=epn_pn,
string_value=epn)
experiment_id = parameter.parameterset.experiment.id
experiment = Experiment.objects.filter(pk=experiment_id)
if experiment[0] in Experiment.safe.all(bundle.request.user):
return experiment
return super(ExperimentResource, self).obj_get_list(bundle,
**kwargs)
class DatasetParameterSetResource(ParameterSetResource):
dataset = fields.ForeignKey(
'tardis.tardis_portal.api.DatasetResource', 'dataset')
parameters = fields.ToManyField(
'tardis.tardis_portal.api.DatasetParameterResource',
'datasetparameter_set',
related_name='parameterset', full=True, null=True)
class Meta(ParameterSetResource.Meta):
queryset = DatasetParameterSet.objects.all()
class DatasetParameterResource(ParameterResource):
parameterset = fields.ForeignKey(DatasetParameterSetResource,
'parameterset')
class Meta(ParameterResource.Meta):
queryset = DatasetParameter.objects.all()
class StorageBoxResource(MyTardisModelResource):
options = fields.ToManyField(
'tardis.tardis_portal.api.StorageBoxOptionResource',
attribute=lambda bundle: StorageBoxOption.objects
.filter(storage_box=bundle.obj,
key__in=StorageBoxOptionResource.accessible_keys),
related_name='storage_box',
full=True, null=True)
attributes = fields.ToManyField(
'tardis.tardis_portal.api.StorageBoxAttributeResource',
'attributes',
related_name='storage_box',
full=True, null=True)
class Meta(MyTardisModelResource.Meta):
queryset = StorageBox.objects.all()
class StorageBoxOptionResource(MyTardisModelResource):
accessible_keys = ['location']
storage_box = fields.ForeignKey(
'tardis.tardis_portal.api.StorageBoxResource',
'storage_box',
related_name='options',
full=False)
class Meta(MyTardisModelResource.Meta):
queryset = StorageBoxOption.objects.all()
class StorageBoxAttributeResource(MyTardisModelResource):
storage_box = fields.ForeignKey(
'tardis.tardis_portal.api.StorageBoxResource',
'storage_box',
related_name='attributes',
full=False)
class Meta(MyTardisModelResource.Meta):
queryset = StorageBoxAttribute.objects.all()
class FacilityResource(MyTardisModelResource):
manager_group = fields.ForeignKey(GroupResource, 'manager_group',
null=True, full=True)
class Meta(MyTardisModelResource.Meta):
queryset = Facility.objects.all()
filtering = {
'id': ('exact', ),
'manager_group': ALL_WITH_RELATIONS,
'name': ('exact', ),
}
always_return_data = True
class InstrumentResource(MyTardisModelResource):
facility = fields.ForeignKey(FacilityResource, 'facility',
null=True, full=True)
class Meta(MyTardisModelResource.Meta):
queryset = Instrument.objects.all()
filtering = {
'id': ('exact', ),
'facility': ALL_WITH_RELATIONS,
'name': ('exact', ),
}
always_return_data = True
class DatasetResource(MyTardisModelResource):
experiments = fields.ToManyField(
ExperimentResource, 'experiments', related_name='datasets')
parameter_sets = fields.ToManyField(
DatasetParameterSetResource,
'datasetparameterset_set',
related_name='dataset',
full=True, null=True)
instrument = fields.ForeignKey(
InstrumentResource,
'instrument',
null=True,
full=True)
class Meta(MyTardisModelResource.Meta):
queryset = Dataset.objects.all()
filtering = {
'id': ('exact', ),
'experiments': ALL_WITH_RELATIONS,
'description': ('exact', ),
'directory': ('exact', ),
}
always_return_data = True
def prepend_urls(self):
return [
url(r'^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/files/'
r'(?:(?P<file_path>.+))?$' % self._meta.resource_name,
self.wrap_view('get_datafiles'),
name='api_get_datafiles_for_dataset'),
]
def get_datafiles(self, request, **kwargs):
file_path = kwargs.get('file_path', None)
dataset_id = kwargs['pk']
datafiles = DataFile.objects.filter(dataset__id=dataset_id)
auth_bundle = self.build_bundle(request=request)
auth_bundle.obj = DataFile()
self.authorized_read_list(
datafiles, auth_bundle
)
del kwargs['pk']
del kwargs['file_path']
kwargs['dataset__id'] = dataset_id
if file_path is not None:
kwargs['directory__startswith'] = file_path
df_res = DataFileResource()
return df_res.dispatch('list', request, **kwargs)
class DataFileResource(MyTardisModelResource):
dataset = fields.ForeignKey(DatasetResource, 'dataset')
parameter_sets = fields.ToManyField(
'tardis.tardis_portal.api.DatafileParameterSetResource',
'datafileparameterset_set',
related_name='datafile',
full=True, null=True)
datafile = fields.FileField()
replicas = fields.ToManyField(
'tardis.tardis_portal.api.ReplicaResource',
'file_objects',
related_name='datafile', full=True, null=True)
temp_url = None
class Meta(MyTardisModelResource.Meta):
queryset = DataFile.objects.all()
filtering = {
'directory': ('exact', 'startswith'),
'dataset': ALL_WITH_RELATIONS,
'filename': ('exact', ),
}
resource_name = 'dataset_file'
def download_file(self, request, **kwargs):
'''
curl needs the -J switch to get the filename right
auth needs to be added manually here
'''
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
if not has_datafile_download_access(
request=request, datafile_id=kwargs['pk']):
return HttpResponseForbidden()
file_record = self._meta.queryset.get(pk=kwargs['pk'])
self.authorized_read_detail(
[file_record],
self.build_bundle(obj=file_record, request=request))
file_object = file_record.get_file()
wrapper = FileWrapper(file_object)
response = StreamingHttpResponse(
wrapper, content_type=file_record.mimetype)
response['Content-Length'] = file_record.size
response['Content-Disposition'] = 'attachment; filename="%s"' % \
file_record.filename
self.log_throttled_access(request)
return response
def verify_file(self, request, **kwargs):
'''triggers verification of file, e.g. after non-POST upload complete
'''
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
if not has_datafile_download_access(
request=request, datafile_id=kwargs['pk']):
return HttpResponseForbidden()
file_record = self._meta.queryset.get(pk=kwargs['pk'])
self.authorized_read_detail(
[file_record],
self.build_bundle(obj=file_record, request=request))
for dfo in file_record.file_objects.all():
tasks.dfo_verify.delay(dfo.id)
return HttpResponse()
def hydrate(self, bundle):
if 'attached_file' in bundle.data:
# have POSTed file
newfile = bundle.data['attached_file'][0]
if 'md5sum' not in bundle.data and 'sha512sum' not in bundle.data:
checksums = compute_checksums(newfile, close_file=False)
bundle.data['md5sum'] = checksums['md5sum']
bundle.data['sha512sum'] = checksums['sha512sum']
if 'replicas' in bundle.data:
for replica in bundle.data['replicas']:
replica.update({'file_object': newfile})
else:
bundle.data['replicas'] = [{'file_object': newfile}]
del(bundle.data['attached_file'])
return bundle
def obj_create(self, bundle, **kwargs):
retval = super(DataFileResource, self).obj_create(bundle, **kwargs)
if 'replicas' not in bundle.data or not bundle.data['replicas']:
# no replica specified: return upload path and create dfo for
# new path
sbox = bundle.obj.get_receiving_storage_box()
if sbox is None:
raise NotImplementedError
dfo = DataFileObject(
datafile=bundle.obj,
storage_box=sbox)
dfo.create_set_uri()
dfo.save()
self.temp_url = dfo.get_full_path()
return retval
def post_list(self, request, **kwargs):
response = super(DataFileResource, self).post_list(request,
**kwargs)
if self.temp_url is not None:
response.content = self.temp_url
self.temp_url = None
return response
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('download_file'), name="api_download_file"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/verify%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('verify_file'), name="api_verify_file"),
]
def deserialize(self, request, data, format=None):
'''
from https://github.com/toastdriven/django-tastypie/issues/42
modified to deserialize json sent via POST. Would fail if data is sent
in a different format.
uses a hack to get back pure json from request.POST
'''
if not format:
format = request.META.get('CONTENT_TYPE', 'application/json')
if format == 'application/x-www-form-urlencoded':
return request.POST
if format.startswith('multipart'):
jsondata = request.POST['json_data']
data = super(DataFileResource, self).deserialize(
request, jsondata, format='application/json')
data.update(request.FILES)
return data
return super(DataFileResource, self).deserialize(request,
data, format)
def put_detail(self, request, **kwargs):
'''
from https://github.com/toastdriven/django-tastypie/issues/42
'''
if request.META.get('CONTENT_TYPE').startswith('multipart') and \
not hasattr(request, '_body'):
request._body = ''
return super(DataFileResource, self).put_detail(request, **kwargs)
class DatafileParameterSetResource(ParameterSetResource):
datafile = fields.ForeignKey(
DataFileResource, 'datafile')
parameters = fields.ToManyField(
'tardis.tardis_portal.api.DatafileParameterResource',
'datafileparameter_set',
related_name='parameterset', full=True, null=True)
class Meta(ParameterSetResource.Meta):
queryset = DatafileParameterSet.objects.all()
class DatafileParameterResource(ParameterResource):
parameterset = fields.ForeignKey(DatafileParameterSetResource,
'parameterset')
class Meta(ParameterResource.Meta):
queryset = DatafileParameter.objects.all()
class LocationResource(MyTardisModelResource):
class Meta(MyTardisModelResource.Meta):
queryset = StorageBox.objects.all()
class ReplicaResource(MyTardisModelResource):
datafile = fields.ForeignKey(DataFileResource, 'datafile')
class Meta(MyTardisModelResource.Meta):
queryset = DataFileObject.objects.all()
filtering = {
'verified': ('exact',),
'url': ('exact', 'startswith'),
}
def hydrate(self, bundle):
if 'url' in bundle.data:
if 'file_object' not in bundle.data:
bundle.data['uri'] = bundle.data['url']
del(bundle.data['url'])
datafile = bundle.related_obj
bundle.obj.datafile = datafile
bundle.data['datafile'] = datafile
if 'location' in bundle.data:
try:
bundle.obj.storage_box = StorageBox.objects.get(
name=bundle.data['location'])
except StorageBox.DoesNotExist:
bundle.obj.storage_box = datafile.get_default_storage_box()
del(bundle.data['location'])
else:
bundle.obj.storage_box = datafile.get_default_storage_box()
bundle.obj.save()
if 'file_object' in bundle.data:
bundle.obj.file_object = bundle.data['file_object']
bundle.data['file_object'].close()
del(bundle.data['file_object'])
bundle.obj = DataFileObject.objects.get(id=bundle.obj.id)
return bundle
def dehydrate(self, bundle):
dfo = bundle.obj
bundle.data['location'] = dfo.storage_box.name
return bundle
class ObjectACLResource(MyTardisModelResource):
content_object = GenericForeignKeyField({
Experiment: ExperimentResource,
# ...
}, 'content_object')
class Meta:
authentication = default_authentication
authorization = ACLAuthorization()
queryset = ObjectACL.objects.all()
filtering = {
'pluginId': ('exact', ),
'entityId': ('exact', ),
}
def hydrate(self, bundle):
# Fill in the content type.
if bundle.data['content_type'] == 'experiment':
experiment = Experiment.objects.get(pk=bundle.data['object_id'])
bundle.obj.content_type = experiment.get_ct()
else:
raise NotImplementedError(str(bundle.obj))
return bundle
| |
'''
python-mtdev - Python binding to the mtdev library (MIT license)
The mtdev library transforms all variants of kernel MT events to the
slotted type B protocol. The events put into mtdev may be from any MT
device, specifically type A without contact tracking, type A with
contact tracking, or type B with contact tracking. See the kernel
documentation for further details.
'''
import os
from ctypes import cdll, Structure, c_ulong, c_int, c_ushort, \
c_void_p, pointer, POINTER, byref
# load library
libmtdev = cdll.LoadLibrary('libmtdev.so')
# from linux/input.h
MTDEV_CODE_SLOT = 0x2f # MT slot being modified
MTDEV_CODE_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
MTDEV_CODE_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
MTDEV_CODE_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
MTDEV_CODE_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
MTDEV_CODE_ORIENTATION = 0x34 # Ellipse orientation
MTDEV_CODE_POSITION_X = 0x35 # Center X ellipse position
MTDEV_CODE_POSITION_Y = 0x36 # Center Y ellipse position
MTDEV_CODE_TOOL_TYPE = 0x37 # Type of touching device
MTDEV_CODE_BLOB_ID = 0x38 # Group a set of packets as a blob
MTDEV_CODE_TRACKING_ID = 0x39 # Unique ID of initiated contact
MTDEV_CODE_PRESSURE = 0x3a # Pressure on contact area
MTDEV_CODE_ABS_X = 0x00
MTDEV_CODE_ABS_Y = 0x01
MTDEV_CODE_ABS_Z = 0x02
MTDEV_CODE_BTN_DIGI = 0x140
MTDEV_CODE_BTN_TOOL_PEN = 0x140
MTDEV_CODE_BTN_TOOL_RUBBER = 0x141
MTDEV_CODE_BTN_TOOL_BRUSH = 0x142
MTDEV_CODE_BTN_TOOL_PENCIL = 0x143
MTDEV_CODE_BTN_TOOL_AIRBRUSH = 0x144
MTDEV_CODE_BTN_TOOL_FINGER = 0x145
MTDEV_CODE_BTN_TOOL_MOUSE = 0x146
MTDEV_CODE_BTN_TOOL_LENS = 0x147
MTDEV_CODE_BTN_TOUCH = 0x14a
MTDEV_CODE_BTN_STYLUS = 0x14b
MTDEV_CODE_BTN_STYLUS2 = 0x14c
MTDEV_CODE_BTN_TOOL_DOUBLETAP = 0x14d
MTDEV_CODE_BTN_TOOL_TRIPLETAP = 0x14e
MTDEV_CODE_BTN_TOOL_QUADTAP = 0x14f # Four fingers on trackpad
MTDEV_TYPE_EV_ABS = 0x03
MTDEV_TYPE_EV_SYN = 0x00
MTDEV_TYPE_EV_KEY = 0x01
MTDEV_TYPE_EV_REL = 0x02
MTDEV_TYPE_EV_ABS = 0x03
MTDEV_TYPE_EV_MSC = 0x04
MTDEV_TYPE_EV_SW = 0x05
MTDEV_TYPE_EV_LED = 0x11
MTDEV_TYPE_EV_SND = 0x12
MTDEV_TYPE_EV_REP = 0x14
MTDEV_TYPE_EV_FF = 0x15
MTDEV_TYPE_EV_PWR = 0x16
MTDEV_TYPE_EV_FF_STATUS = 0x17
MTDEV_ABS_TRACKING_ID = 9
MTDEV_ABS_POSITION_X = 5
MTDEV_ABS_POSITION_Y = 6
MTDEV_ABS_TOUCH_MAJOR = 0
MTDEV_ABS_TOUCH_MINOR = 1
MTDEV_ABS_WIDTH_MAJOR = 2
MTDEV_ABS_WIDTH_MINOR = 3
MTDEV_ABS_ORIENTATION = 4
MTDEV_ABS_SIZE = 11
class timeval(Structure):
_fields_ = [
('tv_sec', c_ulong),
('tv_usec', c_ulong)
]
class input_event(Structure):
_fields_ = [
('time', timeval),
('type', c_ushort),
('code', c_ushort),
('value', c_int)
]
class input_absinfo(Structure):
_fields_ = [
('value', c_int),
('minimum', c_int),
('maximum', c_int),
('fuzz', c_int),
('flat', c_int),
('resolution', c_int)
]
class mtdev_caps(Structure):
_fields_ = [
('has_mtdata', c_int),
('has_slot', c_int),
('has_abs', c_int * MTDEV_ABS_SIZE),
('slot', input_absinfo),
('abs', input_absinfo * MTDEV_ABS_SIZE)
]
class mtdev(Structure):
_fields_ = [
('caps', mtdev_caps),
('state', c_void_p)
]
# binding
mtdev_open = libmtdev.mtdev_open
mtdev_open.argtypes = [POINTER(mtdev), c_int]
mtdev_get = libmtdev.mtdev_get
mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]
mtdev_idle = libmtdev.mtdev_idle
mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]
mtdev_close = libmtdev.mtdev_close
mtdev_close.argtypes = [POINTER(mtdev)]
class Device:
def __init__(self, filename):
self._filename = filename
self._fd = -1
self._device = mtdev()
self._fd = os.open(filename, os.O_NONBLOCK | os.O_RDONLY)
ret = mtdev_open(pointer(self._device), self._fd)
if ret != 0:
os.close(self._fd)
self._fd = -1
raise Exception('Unable to open device')
def close(self):
'''Close the mtdev converter
'''
if self._fd == -1:
return
mtdev_close(POINTER(self._device))
os.close(self._fd)
self._fd = -1
def idle(self, ms):
'''Check state of kernel device
:Parameters:
`ms` : int
Number of milliseconds to wait for activity
:Return:
Return True if the device is idle, i.e, there are no fetched events
in the pipe and there is nothing to fetch from the device.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(mtdev_idle(pointer(self._device), self._fd, ms))
def get(self):
if self._fd == -1:
raise Exception('Device closed')
ev = input_event()
if mtdev_get(pointer(self._device), self._fd, byref(ev), 1) <= 0:
return None
return ev
def has_mtdata(self):
'''Return True if the device has multitouch data.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(self._device.caps.has_mtdata)
def has_slot(self):
'''Return True if the device has slot information.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(self._device.caps.has_slot)
def has_abs(self, index):
'''Return True if the device has abs data.
:Parameters:
`index` : int
One of const starting with a name ABS_MT_
'''
if self._fd == -1:
raise Exception('Device closed')
if index < 0 or index >= MTDEV_ABS_SIZE:
raise IndexError('Invalid index')
return bool(self._device.caps.has_abs[index])
def get_max_abs(self):
'''Return the maximum number of abs information available.
'''
return MTDEV_ABS_SIZE
def get_slot(self):
'''Return the slot data.
'''
if self._fd == -1:
raise Exception('Device closed')
if self._device.caps.has_slot == 0:
return
return self._device.caps.slot
def get_abs(self, index):
'''Return the abs data.
:Parameters:
`index` : int
One of const starting with a name ABS_MT_
'''
if self._fd == -1:
raise Exception('Device closed')
if index < 0 or index >= MTDEV_ABS_SIZE:
raise IndexError('Invalid index')
return self._device.caps.abs[index]
| |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for video."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import contrib
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import summary_op_util # pylint: disable=g-direct-tensorflow-import
# After tf-nightly 1.14.1.dev20190314 summary_op_util.skip_summary was extracted
# out to the distribute module.
try:
from tensorflow.python.distribute import summary_op_util as distribute_summary_op_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
except ImportError:
distribute_summary_op_util = summary_op_util
tfl = common_layers.layers()
def swap_time_and_batch_axes(inputs):
"""Swaps time and batch axis (the first two axis)."""
transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0)
return tf.transpose(inputs, transposed_axes)
def encode_to_shape(inputs, shape, scope):
"""Encode the given tensor to given image shape."""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
w, h = shape[1], shape[2]
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, w * h, activation=None, name="enc_dense")
x = tf.reshape(x, (-1, w, h, 1))
return x
def decode_to_shape(inputs, shape, scope):
"""Encode the given tensor to given image shape."""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, shape[2], activation=None, name="dec_dense")
x = tf.expand_dims(x, axis=1)
return x
def basic_lstm(inputs, state, num_units, name=None):
"""Basic LSTM."""
input_shape = common_layers.shape_list(inputs)
# reuse parameters across time-steps.
cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, name=name, reuse=tf.AUTO_REUSE)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
"""Full LSTM cell."""
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = contrib.rnn().ConvLSTMCell(
2, input_shape, output_channels, [kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
def inject_additional_input(layer, inputs, name, mode="concat"):
"""Injects the additional input into the layer.
Args:
layer: layer that the input should be injected to.
inputs: inputs to be injected.
name: TF scope name.
mode: how the infor should be added to the layer:
"concat" concats as additional channels.
"multiplicative" broadcasts inputs and multiply them to the channels.
"multi_additive" broadcasts inputs and multiply and add to the channels.
Returns:
updated layer.
Raises:
ValueError: in case of unknown mode.
"""
layer_shape = common_layers.shape_list(layer)
input_shape = common_layers.shape_list(inputs)
zeros_mask = tf.zeros(layer_shape, dtype=tf.float32)
if mode == "concat":
emb = encode_to_shape(inputs, layer_shape, name)
layer = tf.concat(values=[layer, emb], axis=-1)
elif mode == "multiplicative":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mask = tf.layers.dense(input_reshaped, filters, name=name)
input_broad = input_mask + zeros_mask
layer *= input_broad
elif mode == "multi_additive":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul")
layer *= tf.nn.sigmoid(input_mul)
input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add")
layer += input_add
else:
raise ValueError("Unknown injection mode: %s" % mode)
return layer
def scheduled_sample_prob(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Probability based scheduled sampling.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: probability of choosing from ground_truth.
Returns:
New batch with randomly selected data points.
"""
probability_threshold = scheduled_sample_var
probability_of_generated = tf.random_uniform([batch_size])
return tf.where(probability_of_generated > probability_threshold,
generated_x, ground_truth_x)
def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(
kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,
dna_kernel_size, relu_shift):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = tf.shape(cdna_input)[0]
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = tfl.dense(
cdna_input, dna_kernel_size * dna_kernel_size * num_masks,
name="cdna_params",
activation=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])
cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift)
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(
cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(
prev_image, cdna_kerns, [1, 1, 1, 1], "SAME")
# Transpose the dimensions to where they belong.
transformed = tf.reshape(
transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
"""A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
"""
with tf.variable_scope(scope):
net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,
activation=None, name="conv")
if has_batchnorm:
net = tfl.batch_normalization(net, training=is_training, name="bn")
net = activation(net)
return net
def tile_and_concat(image, latent, concat_latent=True):
"""Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
"""
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape[1]
height_multiples = height // latent_dims
pad = height - (height_multiples * latent_dims)
latent = tf.reshape(latent, (-1, latent_dims, 1, 1))
latent = tf.tile(latent, (1, height_multiples, width, 1))
latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]])
return tf.concat([image, latent], axis=-1)
def _encode_gif(images, fps):
"""Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
"""
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish()
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False
def py_gif_summary(tag, images, max_outputs, fps, return_summary_value=False):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
tag: Name of the summary.
images: A 5-D `uint8` `np.array` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation.
return_summary_value: If set to True, return a list of tf.Summary.Value
objects in addition to the protocol buffer.
Returns:
The serialized `Summary` protocol buffer.
Raises:
ValueError: If `images` is not a 5-D `uint8` array with 1 or 3 channels.
"""
images = np.asarray(images)
if images.dtype != np.uint8:
raise ValueError("Tensor must have dtype uint8 for gif summary.")
if images.ndim != 5:
raise ValueError("Tensor must be 5-D for gif summary.")
batch_size, _, height, width, channels = images.shape
if channels not in (1, 3):
raise ValueError("Tensors must have 1 or 3 channels for gif summary.")
summ = tf.Summary()
all_summ_values = []
num_outputs = min(batch_size, max_outputs)
for i in range(num_outputs):
image_summ = tf.Summary.Image()
image_summ.height = height
image_summ.width = width
image_summ.colorspace = channels # 1: grayscale, 3: RGB
try:
image_summ.encoded_image_string = _encode_gif(images[i], fps)
except (IOError, OSError) as e:
tf.logging.warning(
"Unable to encode images to a gif string because either ffmpeg is "
"not installed or ffmpeg returned an error: %s. Falling back to an "
"image summary of the first frame in the sequence.", e)
try:
from PIL import Image # pylint: disable=g-import-not-at-top
import io # pylint: disable=g-import-not-at-top
with io.BytesIO() as output:
Image.fromarray(images[i][0]).save(output, "PNG")
image_summ.encoded_image_string = output.getvalue()
except ImportError as e:
tf.logging.warning(
"Gif summaries requires ffmpeg or PIL to be installed: %s", e)
image_summ.encoded_image_string = ""
if num_outputs == 1:
summ_tag = "{}/gif".format(tag)
else:
summ_tag = "{}/gif/{}".format(tag, i)
curr_summ_value = tf.Summary.Value(tag=summ_tag, image=image_summ)
all_summ_values.append(curr_summ_value)
summ.value.add(tag=summ_tag, image=image_summ)
summ_str = summ.SerializeToString()
if return_summary_value:
return all_summ_values, summ_str
return summ_str
def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None,
family=None):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
name: Name of the summary.
tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation
collections: Optional list of tf.GraphKeys. The collections to add the
summary to. Defaults to [tf.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
Raises:
ValueError: if the given tensor has the wrong shape.
"""
tensor = tf.convert_to_tensor(tensor)
if len(tensor.get_shape()) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(tensor.get_shape()))
tensor = tf.cast(tensor, tf.uint8)
if distribute_summary_op_util.skip_summary():
return tf.constant("")
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = tf.py_func(
py_gif_summary,
[tag, tensor, max_outputs, fps],
tf.string,
stateful=False,
name=scope)
summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES])
return val
def tinyify(array, tiny_mode, small_mode):
if tiny_mode:
return [1 for _ in array]
if small_mode:
return [max(x // 4, 1) for x in array]
return array
def get_gaussian_tensor(mean, log_var):
z = tf.random_normal(tf.shape(mean), 0, 1, dtype=tf.float32)
z = mean + tf.exp(log_var / 2.0) * z
return z
def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5,
is_training=False, random_latent=False,
tiny_mode=False, small_mode=False):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
time_axis: the time axis in images tensor
latent_channels: number of latent channels
min_logvar: minimum value for log_var
is_training: whether or not it is training mode
random_latent: whether or not generate random latents
tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number
of conv channels to 1 at each layer. useful for testing the
integration tests.
small_mode: whether or not it is small_mode. small mode is the same model
with less conv and lstm layers and also lower number of channels.
suitable for videos with less complexity and testing.
Returns:
latent_mean: predicted latent mean
latent_logvar: predicted latent log variance
"""
conv_size = tinyify([32, 64, 64], tiny_mode, small_mode)
with tf.variable_scope("latent", reuse=tf.AUTO_REUSE):
images = tf.to_float(images)
images = tf.unstack(images, axis=time_axis)
images = tf.concat(images, axis=3)
x = images
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv1")
x = contrib.layers().layer_norm(x)
if not small_mode:
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv2")
x = contrib.layers().layer_norm(x)
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="latent_conv3")
x = contrib.layers().layer_norm(x)
nc = latent_channels
mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=None, name="latent_mean")
logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_std")
logvar = logv + min_logvar
# No latent tower at inference time, just standard gaussian.
if not is_training:
return tf.zeros_like(mean), tf.zeros_like(logvar)
# No latent in the first phase
ret_mean, ret_logvar = tf.cond(
random_latent,
lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)),
lambda: (mean, logvar))
return ret_mean, ret_logvar
def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end):
"""Get KL multiplier (beta) based on the schedule."""
if decay_start > decay_end:
raise ValueError("decay_end is smaller than decay_end.")
# Since some of the TF schedules do not support incrementing a value,
# in all of the schedules, we anneal the beta from final_beta to zero
# and then reverse it at the bottom.
if schedule == "constant":
decayed_value = 0.0
elif schedule == "linear":
decayed_value = tf.train.polynomial_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start,
end_learning_rate=0.0)
elif schedule == "noisy_linear_cosine_decay":
decayed_value = tf.train.noisy_linear_cosine_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start)
# TODO(mechcoder): Add log_annealing schedule.
else:
raise ValueError("Unknown beta schedule.")
increased_value = final_beta - decayed_value
increased_value = tf.maximum(0.0, increased_value)
beta = tf.case(
pred_fn_pairs={
tf.less(global_step, decay_start): lambda: 0.0,
tf.greater(global_step, decay_end): lambda: final_beta},
default=lambda: increased_value)
return beta
def extract_random_video_patch(videos, num_frames=-1):
"""For every video, extract a random consecutive patch of num_frames.
Args:
videos: 5-D Tensor, (NTHWC)
num_frames: Integer, if -1 then the entire video is returned.
Returns:
video_patch: 5-D Tensor, (NTHWC) with T = num_frames.
Raises:
ValueError: If num_frames is greater than the number of total frames in
the video.
"""
if num_frames == -1:
return videos
batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)
if num_total_frames < num_frames:
raise ValueError("Expected num_frames <= %d, got %d" %
(num_total_frames, num_frames))
# Randomly choose start_inds for each video.
frame_start = tf.random_uniform(
shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1,
dtype=tf.int32)
# [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ...
# [start[batch_size-1], ... start[batch_size-1] + num_frames - 1]
range_inds = tf.expand_dims(tf.range(num_frames), axis=0)
frame_inds = range_inds + tf.expand_dims(frame_start, axis=1)
frame_inds = tf.reshape(frame_inds, [-1])
# [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames
batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)
batch_inds = tf.tile(batch_inds, [1, num_frames])
batch_inds = tf.reshape(batch_inds, [-1])
gather_inds = tf.stack((batch_inds, frame_inds), axis=1)
video_patches = tf.gather_nd(videos, gather_inds)
return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))
class VideoWriter(object):
"""Base helper class for writing videos."""
def write(self, frame, encoded_frame=None):
"""Writes a single video frame."""
raise NotImplementedError
def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame)
def finish(self):
"""Finishes writing frames and returns output, if any.
Frees any resources acquired by the writer.
"""
pass
def save_to_disk(self, output):
"""Saves output to disk.
Args:
output: result of finish().
"""
raise NotImplementedError
def finish_to_disk(self):
"""Finishes writing frames and saves output to disk, if any."""
output = self.finish() # pylint: disable=assignment-from-no-return
if output is not None:
self.save_to_disk(output)
def __del__(self):
"""Frees any resources acquired by the writer."""
self.finish()
class WholeVideoWriter(VideoWriter):
"""Helper class for writing whole videos."""
def __init__(self, fps, output_path=None, file_format="gif"):
self.fps = fps
self.output_path = output_path
self.file_format = file_format
self.proc = None
self._out_chunks = []
self._err_chunks = []
self._out_thread = None
self._err_thread = None
def __init_ffmpeg(self, image_shape):
"""Initializes ffmpeg to write frames."""
import itertools # pylint: disable=g-import-not-at-top
from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member
ffmpeg = "ffmpeg"
height, width, channels = image_shape
self.cmd = [
ffmpeg, "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-r", "%.02f" % self.fps,
"-s", "%dx%d" % (width, height),
"-pix_fmt", {1: "gray", 3: "rgb24"}[channels],
"-i", "-",
"-filter_complex", "[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];"
"[w][y]paletteuse,fifo",
"-r", "%.02f" % self.fps,
"-f", self.file_format,
"-qscale", "0",
"-"
]
self.proc = Popen(
self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1
)
(self._out_thread, self._err_thread) = itertools.starmap(
self._start_reader_thread, [
(self.proc.stdout, self._out_chunks),
(self.proc.stderr, self._err_chunks)
]
)
def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread
def write(self, frame, encoded_frame=None):
if self.proc is None:
self.__init_ffmpeg(frame.shape)
self.proc.stdin.write(frame.tostring())
def finish(self):
"""Finishes transconding and returns the video.
Returns:
bytes
Raises:
IOError: in case of transcoding error.
"""
if self.proc is None:
return None
self.proc.stdin.close()
for thread in (self._out_thread, self._err_thread):
thread.join()
(out, err) = [
b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks)
]
self.proc.stdout.close()
self.proc.stderr.close()
if self.proc.returncode:
err = "\n".join([" ".join(self.cmd), err.decode("utf8")])
raise IOError(err)
del self.proc
self.proc = None
return out
def save_to_disk(self, output):
if self.output_path is None:
raise ValueError(
"This writer doesn't support saving to disk (output_path not "
"specified)."
)
with tf.gfile.Open(self.output_path, "w") as f:
f.write(output)
class BatchWholeVideoWriter(VideoWriter):
"""Helper class for writing videos in batch."""
def __init__(self, fps, path_template, file_format="gif"):
self.fps = fps
self.path_template = path_template
self.file_format = file_format
self.writers = None
def write(self, batch_frame, batch_encoded_frame=None):
del batch_encoded_frame
if self.writers is None:
self.writers = [
WholeVideoWriter( # pylint: disable=g-complex-comprehension
self.fps, self.path_template.format(i), self.file_format
)
for i in range(len(batch_frame))
]
for i, frame in enumerate(batch_frame):
self.writers[i].write(frame)
def finish(self):
outs = [w.finish() for w in self.writers]
return outs
def save_to_disk(self, outputs):
for (writer, output) in zip(self.writers, outputs):
writer.save_to_disk(output)
class IndividualFrameWriter(VideoWriter):
"""Helper class for writing individual video frames."""
def __init__(self, output_dir):
self.output_dir = output_dir
self._counter = 0
def write(self, frame=None, encoded_frame=None):
import os # pylint: disable=g-import-not-at-top
if encoded_frame is None:
raise ValueError("This writer only supports encoded frames.")
path = os.path.join(self.output_dir, "frame_%05d.png" % self._counter)
with tf.gfile.Open(path, "wb") as f:
f.write(encoded_frame)
self._counter += 1
| |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %%
import numpy as np
import pylab as plt
import h5py
from tqdm import tqdm_notebook
from scipy import ndimage as ndi
from scipy.ndimage.morphology import distance_transform_edt
from scipy.ndimage.morphology import binary_closing, binary_fill_holes, binary_opening, binary_dilation, binary_erosion
# from skimage.morphology import watershe
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from skimage.segmentation import watershed, random_walker
from pathlib import Path
import cv2
# %%
data_folderes = ['/diskmnt/b/makov/robotom/a47e0d1d-444c-4647-b9e1-5f0a3d7441b6',
'/diskmnt/b/makov/robotom/cd190221-f7c9-4509-a65e-039097920945',
'/diskmnt/b/makov/robotom/02223e5a-1018-461d-baeb-471daa535d8f']
# %%
df_number = 2
df = Path(data_folderes[df_number])
data = h5py.File(df / 'tomo_rec.h5')['Reconstruction']
if df_number == 0 :
data = data[210:,300:1000, 200:900]
elif df_number == 1 :
data = data[1850:2400, 400:1600, 400:1700]
elif df_number ==2:
data = data[1790:2320, 580:1300, 500:1260]
# %%
# plt.figure()
# plt.imshow(data[1790:2320, 500:1300, data.shape[2]//2])
# plt.show()
# %%
def create_mask(im):
t = im>0.01
t = cv2.medianBlur(t.astype('float32'), 7)
t = binary_fill_holes(binary_closing(t))
t = binary_erosion(t)
return t
# %%
plt.figure(figsize=(10,10))
plt.imshow(data[340], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
plt.colorbar()
plt.show()
plt.figure(figsize=(10,10))
plt.imshow(create_mask(data[340]), cmap=plt.cm.gray_r)
# plt.colorbar()
plt.show()
# %%
# # !rm -rf images
# %%
out_dir = Path('pores')/str(df_number)
out_dir.mkdir(parents=True, exist_ok=True)
# %%
for i in tqdm_notebook(range(data.shape[0])):
plt.imsave(out_dir / f'0_{i}.png',data[i], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
for i in tqdm_notebook(range(data.shape[1])):
plt.imsave(out_dir / f'1_{i}.png',data[:,i,:], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
for i in tqdm_notebook(range(data.shape[2])):
plt.imsave(out_dir / f'2_{i}.png',data[:,:,i], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
# %%
# !ffmpeg -y -r 10 -i "{out_dir}/0_%d.png" -b:v 2000k {out_dir}/poly_0.avi
# !ffmpeg -y -r 10 -i "{out_dir}/1_%d.png" -b:v 2000k {out_dir}/poly_1.avi
# !ffmpeg -y -r 10 -i "{out_dir}/2_%d.png" -b:v 2000k {out_dir}/poly_2.avi
# %%
for i in tqdm_notebook(range(data.shape[0])):
if df_number == 0:
thr = 0.01
elif df_number == 1:
thr = 0.05
data[i] = cv2.medianBlur(np.asarray(data[i]>thr, dtype='float32'), 7)
# %%
# plt.figure(figsize=(12,12))
# plt.imshow(cv2.medianBlur(np.asarray(data[300,:,:]>0.05, dtype='float32'), 7))
# plt.show()
# %%
# x = data[200]
def find_pores(x, debug=False):
x = x.copy()
x[x<0.01] = 0.01
x_m = cv2.medianBlur(np.asarray(x, dtype='float32'), 7)-cv2.medianBlur(np.asarray(x, dtype='float32'), 3)
data_dtf = distance_transform_edt(x>0)
data_dtf_r = distance_transform_edt(x<1)
pores = binary_opening(binary_closing((np.abs(x_m)<0.004)*(x<0.08)))
mask = create_mask(x)
pores = pores*mask
if debug:
plt.figure(figsize=(15,15))
plt.imshow(x)
plt.contour(pores)
# plt.colorbar(orientation='horizontal')
plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(pores)
# # plt.colorbar(orientation='horizontal')
# plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(mask)
# # plt.colorbar(orientation='horizontal')
# plt.show()
return pores
# %%
for i in range(70,350, 50):
find_pores(data[i], True)
# %%
pores = data.copy()
for i in tqdm_notebook(range(pores.shape[0])):
pores[i] = find_pores(pores[i])
# %%
pores_t = pores #[200:300, 200:500, 200:500]
# mask_t = mask[200:300, 200:500, 200:500]
pores_dtf = distance_transform_edt(pores_t)
pores_dtf_r = distance_transform_edt(1-pores_t)
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf[50])
plt.colorbar(orientation='horizontal')
plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(pores_dtf_r[50]*binary_erosion(mask, iterations=20), vmax=5)
# plt.colorbar(orientation='horizontal')
# plt.show()
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf_r[50], vmax=5)
plt.colorbar(orientation='horizontal')
plt.show()
# %%
# # #https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html#sphx-glr-auto-examples-segmentation-plot-watershed-py
# local_maxi = peak_local_max(pores_dtf, indices=False,
# threshold_abs=2, min_distance=10,# footprint=np.ones((3, 3, 3)),
# labels=pores_t)#
# markers, num_features = ndi.label(local_maxi)#, np.ones((3, 3, 3)))
# labels = watershed(-pores_dtf, markers, mask=pores_t)
# %%
#https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html#sphx-glr-auto-examples-segmentation-plot-watershed-py
# pores_t = pores[200:300, 200:500, 200:500]
# local_maxi = peak_local_max(pores_dtf, indices=False, min_distance=3)#, footprint=np.ones((3, 3, 3)))
# markers, num_features = ndi.label(pores_t)
# labels = watershed(pores_t, markers)
# %%
markers, num_features = ndi.label(pores_dtf>0, np.ones((3, 3, 3)))
num_features
# %%
import os
def reshape_volume(volume, reshape):
res = np.zeros([s//reshape for s in volume.shape], dtype='float32')
xs,ys,zs = [s*reshape for s in res.shape]
for x,y,z in np.ndindex(reshape, reshape, reshape):
res += volume[x:xs:reshape, y:ys:reshape, z:zs:reshape]
return res/reshape**3
def save_amira(in_array, out_path, reshape=3):
data_path = str(out_path)
with open(os.path.join(data_path, 'amira.raw'), 'wb') as amira_file:
reshaped_vol = reshape_volume(in_array, reshape)
reshaped_vol.tofile(amira_file)
file_shape = reshaped_vol.shape
with open(os.path.join(data_path, 'tomo.hx'), 'w') as af:
af.write('# Amira Script\n')
af.write('remove -all\n')
af.write(r'[ load -raw ${SCRIPTDIR}/amira.raw little xfastest float 1 '+
str(file_shape[2])+' '+str(file_shape[1])+' '+str(file_shape[0])+
' 0 '+str(file_shape[2]-1)+' 0 '+str(file_shape[1]-1)+' 0 '+str(file_shape[0]-1)+
' ] setLabel tomo.raw\n')
# %%
save_amira(markers, out_dir, 1)
# %%
regions=regionprops(markers)
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf[50])
# plt.colorbar(orientation='horizontal')
plt.contour(markers[50],colors='r')
plt.show()
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_t[50])
plt.contour(markers[50], colors='r')#, vmin = np.percentile(labels[200].flat, 77))
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[50])
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[:,200,:])
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[:,:,200])
# plt.colorbar(orientation='horizontal')
plt.show()
# %%
vol = [r.area for r in regions if r.area<1e7]
# #volume of each pore
# vol = np.zeros((num_features+1), dtype=int)
# for x in tqdm_notebook(labels.flat):
# vol[x] += 1
# %%
xv, yv = np.histogram(vol[1:], bins=100)
plt.figure(figsize=(15,15))
plt.semilogy(yv[1:],xv,'o')
plt.grid()
plt.show()
# %%
#Raduis of each pore
tt = local_maxi*pores_dtf #todo.fixit
xr, yr = np.histogram(tt.flat, bins=100)
xr0, yr0 = np.histogram(np.power(vol,1./3), bins=1000)
# %%
plt.figure(figsize=(15,15))
plt.semilogy(yr[1:],xr[:],'o')
plt.semilogy(yr0[2:],xr0[1:],'o')
plt.xlim([0,20])
plt.grid()
plt.show()
# %%
# %%
| |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {0}'
'\ng.shape = {1}'
'\nb.shape = {2}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| |
#!/usr/bin/env
"""
GOA_Winds_NARR_model_prep.py
Retrieve NARR winds for two locations:
GorePoint - 58deg 58min N, 150deg 56min W
and Globec3 59.273701N, 148.9653W
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','GLOBEC3', 'Gorept','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
def pythondate2str(pdate):
(year,month,day) = datetime.datetime.fromordinal(int(pdate)).strftime('%Y-%b-%d').split('-')
delta_t = pdate - int(pdate)
dhour = str(int(np.floor(24 * (delta_t))))
dmin = str(int(np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))
dsec = str(int(np.floor(60 * ((60 * ((24 * (delta_t)) - np.floor(24 * (delta_t)))) - \
np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))))
#add zeros to time
if len(dhour) == 1:
dhour = '0' + dhour
if len(dmin) == 1:
dmin = '0' + dmin
if len(dsec) == 1:
dsec = '0' + dsec
return year + '-' + month + '-' + day + ' ' + dhour+':'+dmin+':'+dsec
"---"
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.2003.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = ['Chiniak Trough','Chiniak Trough']
sta_lat = [57.33333,57.33333]
sta_long = [151.33333,151.33333]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
globec_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
gorept_pt = sphered.nearest_point([sta_lat[1],-1 * sta_long[1]],lat_lon['lat'],lat_lon['lon'], '2d')
globec_modelpt = [lat_lon['lat'][globec_pt[3],globec_pt[4]],lat_lon['lon'][globec_pt[3],globec_pt[4]]]
gorept_modelpt = [lat_lon['lat'][gorept_pt[3],gorept_pt[4]],lat_lon['lon'][gorept_pt[3],gorept_pt[4]]]
print "Globec nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], globec_modelpt[0], globec_modelpt[1])
print "GorePt nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[1], sta_long[1], gorept_modelpt[0], gorept_modelpt[1])
#loop over all requested data
#years = arange(1984,2014,1)
#years = [1984, 1987, 1989, 1991, 1994, 2001, 2002, 2003, 2004, 2005, 2006, 2011, 2013]
years = [2001,2002]
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3u_f = triangle_smoothing(globec3_data['uwnd'])
goreptu_f = triangle_smoothing(gorept_data['uwnd'])
globec3u = globec3_data['uwnd']
goreptu = gorept_data['uwnd']
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3v_f = triangle_smoothing(globec3_data['vwnd'])
goreptv_f = triangle_smoothing(gorept_data['vwnd'])
globec3v = globec3_data['vwnd']
goreptv = gorept_data['vwnd']
#rotate to shore (Along/Across)
NARR_wind_mag = np.sqrt(globec3u**2. + globec3v**2.)
NARR_wind_dir_math = np.rad2deg(np.arctan2(globec3v, globec3u))
(NARRalong, NARRcross) = rotate_coord(135., NARR_wind_mag, NARR_wind_dir_math)
#convert to EPIC time
pydate = date2pydate(globec3_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_5720N15120W_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[1], [epic_time, epic_time1], globec_modelpt, [globec3u_f, globec3v_f])
outfile = 'data/NARR_5720N15120W_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], gorept_modelpt, [goreptu_f, goreptv_f])
output2screen = False
if output2screen:
print"Date/Time, Across (m/s), Along(m/s)\n"
for i,v in enumerate(pydate):
print "{0}, {1}, {2}".format(pythondate2str(v), NARRcross[i],NARRalong[i])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0], -1. * sta_long[1]],sta_lat)
x_close, y_close = m([globec_modelpt[1],gorept_modelpt[1]], [globec_modelpt[0],gorept_modelpt[0]])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/ChiniakTrough_region.png', bbox_inches='tight', dpi = (100))
plt.close()
| |
import abc
import os
from hashlib import md5, sha1
import hkdf
from Crypto.Cipher import AES, ARC4, ChaCha20, ChaCha20_Poly1305, Salsa20
class BaseCipher:
def __init__(self, password: str):
self.master_key = self._get_key(password.encode("ascii", "ignore"))
def _get_key(self, password: bytes, salt: bytes = b"") -> bytes:
keybuf = []
while len(b"".join(keybuf)) < self.KEY_SIZE:
keybuf.append(
md5((keybuf[-1] if keybuf else b"") + password + salt).digest()
)
return b"".join(keybuf)[: self.KEY_SIZE]
class AEADCipher(BaseCipher, metaclass=abc.ABCMeta):
info = b"ss-subkey"
is_stream_cipher = False
PACKET_LIMIT = 0x3FFF
@property
@abc.abstractmethod
def KEY_SIZE(self):
""
@property
@abc.abstractmethod
def SALT_SIZE(self):
""
@property
@abc.abstractmethod
def NONCE_SIZE(self):
""
@property
@abc.abstractmethod
def TAG_SIZE(self):
""
def _derive_subkey(self, salt: bytes) -> bytes:
return hkdf.Hkdf(salt, self.master_key, sha1).expand(self.info, self.KEY_SIZE)
def random_salt(self) -> bytes:
return os.urandom(self.SALT_SIZE)
def make_encrypter(self, salt: bytes = None) -> (bytes, bytes):
counter = 0
salt = salt if salt is not None else self.random_salt()
subkey = self._derive_subkey(salt)
def encrypt(plaintext: bytes) -> bytes:
nonlocal counter
nonce = counter.to_bytes(self.NONCE_SIZE, "little")
counter += 1
encrypter = self.new_cipher(subkey, nonce)
if len(plaintext) <= self.PACKET_LIMIT:
return encrypter.encrypt_and_digest(plaintext)
else:
with memoryview(plaintext) as data:
return encrypter.encrypt_and_digest(
data[: self.PACKET_LIMIT]
) + encrypt(data[self.PACKET_LIMIT :])
return salt, encrypt
def make_decrypter(self, salt: bytes):
counter = 0
subkey = self._derive_subkey(salt)
def decrypt(ciphertext: bytes, tag: bytes) -> bytes:
nonlocal counter
nonce = counter.to_bytes(self.NONCE_SIZE, "little")
counter += 1
decrypter = self.new_cipher(subkey, nonce)
return decrypter.decrypt_and_verify(ciphertext, tag)
return decrypt
@abc.abstractmethod
def new_cipher(self, subkey: bytes, nonce: bytes):
""
class AES128GCM(AEADCipher):
KEY_SIZE = 16
SALT_SIZE = 16
NONCE_SIZE = 12
TAG_SIZE = 16
def new_cipher(self, subkey: bytes, nonce: bytes):
return AES.new(subkey, AES.MODE_GCM, nonce=nonce, mac_len=self.TAG_SIZE)
class AES192GCM(AES128GCM):
KEY_SIZE = 24
SALT_SIZE = 24
NONCE_SIZE = 12
TAG_SIZE = 16
class AES256GCM(AES128GCM):
KEY_SIZE = 32
SALT_SIZE = 32
NONCE_SIZE = 12
TAG_SIZE = 16
class ChaCha20IETFPoly1305(AEADCipher):
KEY_SIZE = 32
SALT_SIZE = 32
NONCE_SIZE = 12
TAG_SIZE = 16
def new_cipher(self, subkey: bytes, nonce: bytes):
return ChaCha20_Poly1305.new(key=subkey, nonce=nonce)
class StreamCipher(BaseCipher, metaclass=abc.ABCMeta):
is_stream_cipher = True
@property
@abc.abstractmethod
def KEY_SIZE(self):
""
@property
@abc.abstractmethod
def IV_SIZE(self):
""
def random_iv(self):
return os.urandom(self.IV_SIZE)
def make_encrypter(self, iv: bytes = None):
iv = iv if iv is not None else self.random_iv()
cipher = self.new_cipher(self.master_key, iv)
def encrypt(plaintext: bytes) -> bytes:
return cipher.encrypt(plaintext)
return iv, encrypt
def make_decrypter(self, iv):
cipher = self.new_cipher(self.master_key, iv)
def decrypt(ciphertext: bytes) -> bytes:
return cipher.decrypt(ciphertext)
return decrypt
@abc.abstractmethod
def new_cipher(self, key: bytes, iv: bytes):
""
class AES256CFB(StreamCipher):
KEY_SIZE = 32
IV_SIZE = 16
def new_cipher(self, key: bytes, iv: bytes):
return AES.new(key, mode=AES.MODE_CFB, iv=iv, segment_size=128)
class AES128CFB(AES256CFB):
KEY_SIZE = 16
class AES192CFB(AES256CFB):
KEY_SIZE = 24
class ChaCha20Cipher(StreamCipher):
KEY_SIZE = 32
IV_SIZE = 8
def new_cipher(self, key: bytes, iv: bytes):
return ChaCha20.new(key=key, nonce=iv)
class Salsa20Cipher(StreamCipher):
KEY_SIZE = 32
IV_SIZE = 8
def new_cipher(self, key: bytes, iv: bytes):
return Salsa20.new(key=key, nonce=iv)
class RC4Cipher(StreamCipher):
KEY_SIZE = 16
IV_SIZE = 0
def new_cipher(self, key: bytes, iv: bytes):
return ARC4.new(key=key)
ciphers = {
"aes-256-cfb": AES256CFB,
"aes-128-cfb": AES128CFB,
"aes-192-cfb": AES192CFB,
"chacha20": ChaCha20Cipher,
"salsa20": Salsa20Cipher,
"rc4": RC4Cipher,
"aes-256-gcm": AES256GCM,
"aes-192-gcm": AES192GCM,
"aes-128-gcm": AES128GCM,
"chacha20-ietf-poly1305": ChaCha20IETFPoly1305,
}
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
from six.moves import BaseHTTPServer
from six.moves import urllib
from six.moves import input
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write(b"<html><head><title>Authentication Status</title></head>")
self.wfile.write(b"<body><p>The authentication flow has completed.</p>")
self.wfile.write(b"</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The ``run()`` function is called from your application and runs
through all the steps to obtain credentials. It takes a ``Flow``
argument and attempts to open an authorization server page in the
user's default web browser. The server asks the user to grant your
application access to the user's data. If the user grants access,
the ``run()`` function returns new credentials. The new credentials
are also stored in the ``storage`` argument, which updates the file
associated with the ``Storage`` object.
It presumes it is run from a command-line application and supports the
following flags:
``--auth_host_name`` (string, default: ``localhost``)
Host name to use when running a local web server to handle
redirects during OAuth authorization.
``--auth_host_port`` (integer, default: ``[8080, 8090]``)
Port to use when running a local web server to handle redirects
during OAuth authorization. Repeat this option to specify a list
of values.
``--[no]auth_local_webserver`` (boolean, default: ``True``)
Run a local web server to handle redirects during OAuth authorization.
The tools module defines an ``ArgumentParser`` the already contains the flag
definitions that ``run()`` requires. You can pass that ``ArgumentParser`` to your
``ArgumentParser`` constructor::
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a ``Storage`` to store the credential in.
flags: ``argparse.Namespace``, The command-line flags. This is the
object returned from calling ``parse_args()`` on
``argparse.ArgumentParser`` as described above.
http: An instance of ``httplib2.Http.request`` or something that
acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
import webbrowser
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run import run
from oauth2client.old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
| |
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.template.defaultfilters import date
from django.utils import six
from django.utils.html import (conditional_escape, escape, format_html,
format_html_join)
from django.utils.six.moves import reduce
from django.utils.translation import ugettext_lazy as _, ugettext
from djblets.datagrid.grids import CheckboxColumn, Column, DateTimeColumn
from djblets.gravatars import get_gravatar_url
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.models import Profile, ReviewRequestVisit
from reviewboard.reviews.models import ReviewRequest
from reviewboard.reviews.templatetags.reviewtags import render_star
from reviewboard.site.urlresolvers import local_site_reverse
class BaseStarColumn(Column):
"""Indicates if an item is starred.
This is the base class for all columns that deal with starring items.
The star is interactive, allowing the user to star or unstar the item.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(BaseStarColumn, self).__init__(
image_class='rb-icon rb-icon-star-on',
image_alt=_('Starred'),
detailed_label=_('Starred'),
shrink=True,
*args, **kwargs)
def setup_state(self, state):
"""Set up the state for this column."""
state.all_starred = set()
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
obj.starred = obj.pk in state.all_starred
return render_star(state.datagrid.request.user, obj)
class BaseSubmitterColumn(Column):
"""Base class for the Submitter column.
We have two versions of this column: One for review request datagrids,
and one for review datagrids. This columns contains all the common
rendering logic between the two.
"""
GRAVATAR_SIZE = 24
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(BaseSubmitterColumn, self).__init__(
label=_('Submitter'),
field_name='review_request',
css_class='submitter-column',
shrink=True,
sortable=True,
link=True,
*args, **kwargs)
def render_user(self, state, user):
"""Render the user's name and gravatar as HTML."""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('integration_gravatars'):
gravatar_url = get_gravatar_url(state.datagrid.request, user,
self.GRAVATAR_SIZE)
else:
gravatar_url = None
if gravatar_url:
gravatar_html = format_html(
'<img src="{0}" width="{1}" height="{1}" alt="{2}" '
'class="gravatar" /> ',
gravatar_url, self.GRAVATAR_SIZE, user.username)
else:
gravatar_html = ''
return format_html(
'<a class="user" href="{0}">{1}{2}</a>',
user.get_absolute_url(), gravatar_html, user.username)
class BugsColumn(Column):
"""Shows the list of bugs specified on a review request.
The list of bugs will be linked to the bug tracker, if a bug tracker
was configured for the repository the review request's change is on.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(BugsColumn, self).__init__(
label=_('Bugs'),
css_class='bugs',
link=False,
shrink=True,
sortable=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('repository')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
bugs = review_request.get_bug_list()
repository = review_request.repository
local_site_name = None
if review_request.local_site:
local_site_name = review_request.local_site.name
if repository and repository.bug_tracker:
links = []
for bug in bugs:
try:
url = local_site_reverse(
'bug_url',
local_site_name=local_site_name,
args=(review_request.display_id, bug))
links.append(
format_html('<a href="{0}">{1}</a>', url, bug))
except NoReverseMatch:
links.append(escape(bug))
return ', '.join(links)
return ', '.join(bugs)
class ReviewRequestCheckboxColumn(CheckboxColumn):
"""A column containing a check-box."""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if self.is_selectable(state, obj):
checked = ''
if self.is_selected(state, obj):
checked = 'checked="true"'
return ('<input type="checkbox" data-object-id="%s" '
'data-checkbox-name="%s" %s />'
% (obj.display_id, escape(self.checkbox_name), checked))
else:
return ''
class DateTimeSinceColumn(DateTimeColumn):
"""Displays how long it has been since a given date/time.
These columns will dynamically update as the page is shown, so that the
number of minutes, hours, days, etc. ago is correct.
"""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return '<time class="timesince" datetime="%s">%s</time>' % (
date(getattr(obj, self.field_name), 'c'),
super(DateTimeSinceColumn, self).render_data(state, obj))
class DiffUpdatedColumn(DateTimeColumn):
"""Shows the date/time that the diff was last updated."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffUpdatedColumn, self).__init__(
label=_('Diff Updated'),
db_field='diffset_history__last_diff_updated',
field_name='last_diff_updated',
sortable=True,
link=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('diffset_history')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if obj.diffset_history.last_diff_updated:
return super(DiffUpdatedColumn, self).render_data(
state, obj.diffset_history)
else:
return ''
class DiffUpdatedSinceColumn(DateTimeSinceColumn):
"""Shows the elapsed time since the diff was last updated."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffUpdatedSinceColumn, self).__init__(
label=_('Diff Updated'),
db_field='diffset_history__last_diff_updated',
field_name='last_diff_updated',
sortable=True,
link=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('diffset_history')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if obj.diffset_history.last_diff_updated:
return super(DiffUpdatedSinceColumn, self).render_data(
state, obj.diffset_history)
else:
return ''
class GroupMemberCountColumn(Column):
"""Shows the number of users that are part of a review group."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(GroupMemberCountColumn, self).__init__(
link=True,
link_func=self.link_to_object,
*args, **kwargs)
def render_data(self, state, group):
"""Return the rendered contents of the column."""
return six.text_type(group.users.count())
def link_to_object(self, state, group, value):
"""Return the link to the object in the column."""
return local_site_reverse('group-members',
request=state.datagrid.request,
args=[group.name])
class GroupsColumn(Column):
"""Shows the list of groups requested to review the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(GroupsColumn, self).__init__(
label=_('Groups'),
detailed_label=_('Target Groups'),
sortable=False,
shrink=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.prefetch_related('target_groups')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
groups = review_request.target_groups.all()
return reduce(lambda a, d: a + d.name + ' ', groups, '')
class MyCommentsColumn(Column):
"""Shows if the current user has reviewed the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(MyCommentsColumn, self).__init__(
image_class='rb-icon rb-icon-datagrid-comment-draft',
image_alt=_('My Comments'),
detailed_label=_('My Comments'),
shrink=True,
*args, **kwargs)
# XXX It'd be nice to be able to sort on this, but datagrids currently
# can only sort based on stored (in the DB) values, not computed
# values.
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
query_dict = {
'user_id': six.text_type(user.id),
}
return queryset.extra(select={
'mycomments_my_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
""" % query_dict,
'mycomments_private_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND NOT reviews_review.public
""" % query_dict,
'mycomments_shipit_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND reviews_review.ship_it
""" % query_dict,
})
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
user = state.datagrid.request.user
if user.is_anonymous() or review_request.mycomments_my_reviews == 0:
return ''
# Priority is ranked in the following order:
#
# 1) Non-public (draft) reviews
# 2) Public reviews marked "Ship It"
# 3) Public reviews not marked "Ship It"
if review_request.mycomments_private_reviews > 0:
icon_class = 'rb-icon-datagrid-comment-draft'
image_alt = _('Comments drafted')
else:
if review_request.mycomments_shipit_reviews > 0:
icon_class = 'rb-icon-datagrid-comment-shipit'
image_alt = _('Comments published. Ship it!')
else:
icon_class = 'rb-icon-datagrid-comment'
image_alt = _('Comments published')
return '<div class="rb-icon %s" title="%s"></div>' % \
(icon_class, image_alt)
class NewUpdatesColumn(Column):
"""Indicates if there are new updates on a review request.
This will show an icon if the review request has had any new updates
or reviews since the user last saw it.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(NewUpdatesColumn, self).__init__(
image_class='rb-icon rb-icon-datagrid-new-updates',
image_alt=_('New Updates'),
detailed_label=_('New Updates'),
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
if review_request.new_review_count > 0:
return '<div class="%s" title="%s" />' % \
(self.image_class, self.image_alt)
return ''
class PendingCountColumn(Column):
"""Shows the pending number of review requests for a user or group.
This will show the pending number of review requests for the given
review group or user. It only applies to group or user lists.
"""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return six.text_type(
getattr(obj, self.field_name).filter(
public=True, status='P').count())
class PeopleColumn(Column):
"""Shows the list of people requested to review the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(PeopleColumn, self).__init__(
label=_('People'),
detailed_label=_('Target People'),
sortable=False,
shrink=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.prefetch_related('target_people')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
people = review_request.target_people.all()
return reduce(lambda a, d: a + d.username + ' ', people, '')
class RepositoryColumn(Column):
"""Shows the name of the repository the review request's change is on."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(RepositoryColumn, self).__init__(
label=_('Repository'),
db_field='repository__name',
shrink=True,
sortable=True,
link=False,
css_class='repository-column',
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('repository')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return super(RepositoryColumn, self).render_data(state, obj) or ''
class ReviewCountColumn(Column):
"""Shows the number of published reviews for a review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ReviewCountColumn, self).__init__(
label=_('Reviews'),
detailed_label=_('Number of Reviews'),
shrink=True,
link=True,
link_func=self.link_to_object,
*kwargs, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
return six.text_type(review_request.publicreviewcount_count)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.extra(select={
'publicreviewcount_count': """
SELECT COUNT(*)
FROM reviews_review
WHERE reviews_review.public
AND reviews_review.base_reply_to_id is NULL
AND reviews_review.review_request_id =
reviews_reviewrequest.id
"""
})
def link_to_object(self, state, review_request, value):
"""Return the link to the object in the column."""
return '%s#last-review' % review_request.get_absolute_url()
class ReviewGroupStarColumn(BaseStarColumn):
"""Indicates if a review group is starred.
The star is interactive, allowing the user to star or unstar the group.
"""
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
try:
profile = user.get_profile()
except Profile.DoesNotExist:
return queryset
state.all_starred = set(
profile.starred_groups.filter(
pk__in=state.datagrid.id_list).values_list('pk', flat=True))
return queryset
class ReviewRequestIDColumn(Column):
"""Displays the ID of the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ReviewRequestIDColumn, self).__init__(
label=_('ID'),
detailed_label=_('Review Request ID'),
shrink=True,
link=True,
sortable=True,
*args, **kwargs)
def get_sort_field(self, state):
"""Return the model field for sorting this column."""
if state.datagrid.local_site:
return 'local_id'
else:
return 'id'
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
return review_request.display_id
class ReviewRequestStarColumn(BaseStarColumn):
"""Indicates if a review request is starred.
The star is interactive, allowing the user to star or unstar the
review request.
"""
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
try:
profile = user.get_profile()
except Profile.DoesNotExist:
return queryset
state.all_starred = set(
profile.starred_review_requests.filter(
pk__in=state.datagrid.id_list).values_list('pk', flat=True))
return queryset
class ReviewSubmitterColumn(BaseSubmitterColumn):
"""Shows the submitter of the review request for a review."""
def render_data(self, state, review):
"""Return the rendered contents of the column."""
return self.render_user(state, review.review_request.submitter)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('reviews')
class ShipItColumn(Column):
"""Shows the "Ship It" count for a review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ShipItColumn, self).__init__(
image_class='rb-icon rb-icon-shipit',
image_alt=_('Ship It!'),
detailed_label=_('Ship It!'),
db_field='shipit_count',
sortable=True,
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
if review_request.issue_open_count > 0:
return ('<span class="issue-count">'
' <span class="issue-icon">!</span> %s'
'</span>'
% review_request.issue_open_count)
elif review_request.shipit_count > 0:
return '<span class="shipit-count">' \
' <div class="rb-icon rb-icon-shipit-checkmark"' \
' title="%s"></div> %s' \
'</span>' % \
(self.image_alt, review_request.shipit_count)
else:
return ''
class SubmitterColumn(BaseSubmitterColumn):
"""Shows the username of the user who submitted the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(SubmitterColumn, self).__init__(
db_field='submitter__username',
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
return self.render_user(state, review_request.submitter)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('submitter')
class SummaryColumn(Column):
"""Shows the summary of a review request.
This will also prepend the draft/submitted/discarded state, if any,
to the summary.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(SummaryColumn, self).__init__(
label=_('Summary'),
expand=True,
link=True,
css_class='summary',
sortable=True,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
return queryset.extra(select={
'draft_summary': """
SELECT reviews_reviewrequestdraft.summary
FROM reviews_reviewrequestdraft
WHERE reviews_reviewrequestdraft.review_request_id =
reviews_reviewrequest.id
""",
'visibility': """
SELECT accounts_reviewrequestvisit.visibility
FROM accounts_reviewrequestvisit
WHERE accounts_reviewrequestvisit.review_request_id =
reviews_reviewrequest.id
AND accounts_reviewrequestvisit.user_id = %(user_id)s
""" % {
'user_id': six.text_type(user.id)
}
})
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
summary = review_request.summary
labels = []
if review_request.submitter_id == state.datagrid.request.user.id:
if review_request.draft_summary is not None:
summary = review_request.draft_summary
labels.append(('label-draft', _('Draft')))
elif (not review_request.public and
review_request.status == ReviewRequest.PENDING_REVIEW):
labels.append(('label-draft', _('Draft')))
# review_request.visibility is not defined when the user is not
# logged in.
if state.datagrid.request.user.is_authenticated():
if review_request.visibility == ReviewRequestVisit.ARCHIVED:
labels.append(('label-archived', _('Archived')))
elif review_request.visibility == ReviewRequestVisit.MUTED:
labels.append(('label-muted', _('Muted')))
if review_request.status == ReviewRequest.SUBMITTED:
labels.append(('label-submitted', _('Submitted')))
elif review_request.status == ReviewRequest.DISCARDED:
labels.append(('label-discarded', _('Discarded')))
display_data = format_html_join(
'', '<label class="{}">{}</label>', labels)
if summary:
display_data += format_html('<span>{}</span>', summary)
else:
display_data += format_html('<span class="no-summary">{}</span>',
_('No Summary'))
return display_data
class ReviewSummaryColumn(SummaryColumn):
"""Shows the summary of the review request of a review.
This does not (yet) prepend the draft/submitted/discarded state, if any,
to the summary.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(SummaryColumn, self).__init__(
label=_('Review Request Summary'),
expand=True,
link=True,
css_class='summary',
*args, **kwargs)
def render_data(self, state, review):
"""Return the rendered contents of the column."""
return conditional_escape(review.review_request.summary)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('reviews')
class ToMeColumn(Column):
"""Indicates if the user is requested to review the change.
This will show an indicator if the user is on the Target People reviewers
list.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
raquo = '\u00BB'
super(ToMeColumn, self).__init__(
label=raquo,
detailed_label=_('To Me'),
detailed_label_html=(ugettext('%s To Me') % raquo),
shrink=True,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_authenticated():
state.all_to_me = set(
user.directed_review_requests.filter(
pk__in=state.datagrid.id_list).values_list('pk',
flat=True))
else:
state.all_to_me = set()
return queryset
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
if review_request.pk in state.all_to_me:
return ('<div title="%s"><b>»</b></div>'
% (self.detailed_label))
return ''
class DiffSizeColumn(Column):
"""Indicates line add/delete counts for the latest diffset."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffSizeColumn, self).__init__(
label=_('Diff Size'),
sortable=False,
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
try:
diffset = review_request.diffset_history.diffsets.latest()
except ObjectDoesNotExist:
return ''
counts = diffset.get_total_line_counts()
insert_count = counts['raw_insert_count']
delete_count = counts['raw_delete_count']
result = []
if insert_count:
result.append('<span class="diff-size-column insert">+%d</span>' %
insert_count)
if delete_count:
result.append('<span class="diff-size-column delete">-%d</span>' %
delete_count)
return ' '.join(result)
| |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import re
import os
import ast
import _ast
import textwrap
import CommonMark
from collections import OrderedDict
cur_dir = os.path.dirname(__file__)
project_dir = os.path.abspath(os.path.join(cur_dir, '..'))
docs_dir = os.path.join(project_dir, 'docs')
module_name = 'certvalidator'
# Maps a markdown document to a Python source file to look in for
# class/method/function docstrings
MD_SOURCE_MAP = {
'docs/api.md': [
'certvalidator/__init__.py',
'certvalidator/context.py',
],
}
# A search/replace dictionary to modify docstring contents before generating
# markdown from them
definition_replacements = {}
if hasattr(CommonMark, 'DocParser'):
raise EnvironmentError("CommonMark must be version 0.6.0 or newer")
def _get_func_info(docstring, def_lineno, code_lines, prefix):
"""
Extracts the function signature and description of a Python function
:param docstring:
A unicode string of the docstring for the function
:param def_lineno:
An integer line number that function was defined on
:param code_lines:
A list of unicode string lines from the source file the function was
defined in
:param prefix:
A prefix to prepend to all output lines
:return:
A 2-element tuple:
- [0] A unicode string of the function signature with a docstring of
parameter info
- [1] A markdown snippet of the function description
"""
def_index = def_lineno - 1
definition = code_lines[def_index]
definition = definition.rstrip()
while not definition.endswith(':'):
def_index += 1
definition += '\n' + code_lines[def_index].rstrip()
definition = textwrap.dedent(definition).rstrip(':')
definition = definition.replace('\n', '\n' + prefix)
description = ''
found_colon = False
params = ''
for line in docstring.splitlines():
if line and line[0] == ':':
found_colon = True
if not found_colon:
if description:
description += '\n'
description += line
else:
if params:
params += '\n'
params += line
description = description.strip()
description_md = ''
if description:
description_md = '%s%s' % (prefix, description.replace('\n', '\n' + prefix))
description_md = re.sub('\n>(\\s+)\n', '\n>\n', description_md)
params = params.strip()
if params:
definition += (':\n%s """\n%s ' % (prefix, prefix))
definition += params.replace('\n', '\n%s ' % prefix)
definition += ('\n%s """' % prefix)
definition = re.sub('\n>(\\s+)\n', '\n>\n', definition)
for search, replace in definition_replacements.items():
definition = definition.replace(search, replace)
return (definition, description_md)
def _find_sections(md_ast, sections, last, last_class, total_lines=None):
"""
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
"""
def child_walker(node):
for child, entering in node.walker():
if child == node:
continue
yield child, entering
for child, entering in child_walker(md_ast):
if child.t == 'heading':
start_line = child.sourcepos[0][0]
if child.level == 2:
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if child.level in set([3, 5]):
heading_elements = []
for heading_child, _ in child_walker(child):
heading_elements.append(heading_child)
if len(heading_elements) != 2:
continue
first = heading_elements[0]
second = heading_elements[1]
if first.t != 'code':
continue
if second.t != 'text':
continue
type_name = second.literal.strip()
identifier = first.literal.strip().replace('()', '').lstrip('.')
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if type_name == 'function':
if child.level != 3:
continue
if type_name == 'class':
if child.level != 3:
continue
last_class.append(identifier)
if type_name in set(['method', 'attribute']):
if child.level != 5:
continue
identifier = last_class[-1] + '.' + identifier
last.update({
'type_name': type_name,
'identifier': identifier,
'start_line': start_line,
})
elif child.t == 'block_quote':
find_sections(child, sections, last, last_class)
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], total_lines)
find_sections = _find_sections
def walk_ast(node, code_lines, sections, md_chunks):
"""
A callback used to walk the Python AST looking for classes, functions,
methods and attributes. Generates chunks of markdown markup to replace
the existing content.
:param node:
An _ast module node object
:param code_lines:
A list of unicode strings - the source lines of the Python file
:param sections:
A dict of markdown document sections that need to be updated. The key
will be a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param md_chunks:
A dict with keys from the sections param and the values being a unicode
string containing a chunk of markdown markup.
"""
if isinstance(node, _ast.FunctionDef):
key = ('function', node.name)
if key not in sections:
return
docstring = ast.get_docstring(node)
def_lineno = node.lineno + len(node.decorator_list)
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> ')
md_chunk = textwrap.dedent("""
### `%s()` function
> ```python
> %s
> ```
>
%s
""").strip() % (
node.name,
definition,
description_md
) + "\n"
md_chunks[key] = md_chunk
elif isinstance(node, _ast.ClassDef):
if ('class', node.name) not in sections:
return
for subnode in node.body:
if isinstance(subnode, _ast.FunctionDef):
node_id = node.name + '.' + subnode.name
method_key = ('method', node_id)
is_method = method_key in sections
attribute_key = ('attribute', node_id)
is_attribute = attribute_key in sections
is_constructor = subnode.name == '__init__'
if not is_constructor and not is_attribute and not is_method:
continue
docstring = ast.get_docstring(subnode)
def_lineno = subnode.lineno + len(subnode.decorator_list)
if not docstring:
continue
if is_method or is_constructor:
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> > ')
if is_constructor:
key = ('class', node.name)
class_docstring = ast.get_docstring(node) or ''
class_description = textwrap.dedent(class_docstring).strip()
if class_description:
class_description_md = "> %s\n>" % (class_description.replace("\n", "\n> "))
else:
class_description_md = ''
md_chunk = textwrap.dedent("""
### `%s()` class
%s
> ##### constructor
>
> > ```python
> > %s
> > ```
> >
%s
""").strip() % (
node.name,
class_description_md,
definition,
description_md
)
md_chunk = md_chunk.replace('\n\n\n', '\n\n')
else:
key = method_key
md_chunk = textwrap.dedent("""
>
> ##### `.%s()` method
>
> > ```python
> > %s
> > ```
> >
%s
""").strip() % (
subnode.name,
definition,
description_md
)
if md_chunk[-5:] == '\n> >\n':
md_chunk = md_chunk[0:-5]
else:
key = attribute_key
description = textwrap.dedent(docstring).strip()
description_md = "> > %s" % (description.replace("\n", "\n> > "))
md_chunk = textwrap.dedent("""
>
> ##### `.%s` attribute
>
%s
""").strip() % (
subnode.name,
description_md
)
md_chunks[key] = re.sub('[ \\t]+\n', '\n', md_chunk.rstrip())
elif isinstance(node, _ast.If):
for subast in node.body:
walk_ast(subast, code_lines, sections, md_chunks)
for subast in node.orelse:
walk_ast(subast, code_lines, sections, md_chunks)
def run():
"""
Looks through the docs/ dir and parses each markdown document, looking for
sections to update from Python docstrings. Looks for section headers in
the format:
- ### `ClassName()` class
- ##### `.method_name()` method
- ##### `.attribute_name` attribute
- ### `function_name()` function
The markdown content following these section headers up until the next
section header will be replaced by new markdown generated from the Python
docstrings of the associated source files.
By default maps docs/{name}.md to {modulename}/{name}.py. Allows for
custom mapping via the MD_SOURCE_MAP variable.
"""
print('Updating API docs...')
md_files = []
for root, _, filenames in os.walk(docs_dir):
for filename in filenames:
if not filename.endswith('.md'):
continue
md_files.append(os.path.join(root, filename))
parser = CommonMark.Parser()
for md_file in md_files:
md_file_relative = md_file[len(project_dir) + 1:]
if md_file_relative in MD_SOURCE_MAP:
py_files = MD_SOURCE_MAP[md_file_relative]
py_paths = [os.path.join(project_dir, py_file) for py_file in py_files]
else:
py_files = [os.path.basename(md_file).replace('.md', '.py')]
py_paths = [os.path.join(project_dir, module_name, py_files[0])]
if not os.path.exists(py_paths[0]):
continue
with open(md_file, 'rb') as f:
markdown = f.read().decode('utf-8')
original_markdown = markdown
md_lines = list(markdown.splitlines())
md_ast = parser.parse(markdown)
last_class = []
last = {}
sections = OrderedDict()
find_sections(md_ast, sections, last, last_class, markdown.count("\n") + 1)
md_chunks = {}
for index, py_file in enumerate(py_files):
py_path = py_paths[index]
with open(os.path.join(py_path), 'rb') as f:
code = f.read().decode('utf-8')
module_ast = ast.parse(code, filename=py_file)
code_lines = list(code.splitlines())
for node in ast.iter_child_nodes(module_ast):
walk_ast(node, code_lines, sections, md_chunks)
added_lines = 0
def _replace_md(key, sections, md_chunk, md_lines, added_lines):
start, end = sections[key]
start -= 1
start += added_lines
end += added_lines
new_lines = md_chunk.split('\n')
added_lines += len(new_lines) - (end - start)
# Ensure a newline above each class header
if start > 0 and md_lines[start][0:4] == '### ' and md_lines[start - 1][0:1] == '>':
added_lines += 1
new_lines.insert(0, '')
md_lines[start:end] = new_lines
return added_lines
for key in sections:
if key not in md_chunks:
raise ValueError('No documentation found for %s' % key[1])
added_lines = _replace_md(key, sections, md_chunks[key], md_lines, added_lines)
markdown = '\n'.join(md_lines).strip() + '\n'
if original_markdown != markdown:
with open(md_file, 'wb') as f:
f.write(markdown.encode('utf-8'))
if __name__ == '__main__':
run()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/pylibc.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import ctypes
import ctypes.util
from king_phisher import constants
_c_gid_t = ctypes.c_uint32
_c_uid_t = ctypes.c_uint32
def _cstr(c_string, encoding='utf-8'):
if c_string is None:
return None
if isinstance(c_string, bytes):
return c_string.decode(encoding)
if isinstance(c_string, str):
return c_string
raise TypeError('c_string must be None or bytes or instance')
def _cbytes(c_bytes, encoding='utf-8'):
if isinstance(c_bytes, bytes):
return c_bytes
if isinstance(c_bytes, str):
return c_bytes.encode(encoding)
raise TypeError('c_bytes must be None, bytes or str instance')
_GroupTuple = collections.namedtuple(
'_GroupTuple',
('gr_name', 'gr_passwd', 'gr_gid', 'gr_mem')
)
class _GROUP(ctypes.Structure):
_fields_ = (('gr_name', ctypes.c_char_p),
('gr_passwd', ctypes.c_char_p),
('gr_gid', _c_gid_t),
('gr_mem', ctypes.POINTER(ctypes.c_char_p)))
def to_tuple(self, encoding='utf-8'):
members = collections.deque()
for mem in self.gr_mem:
if mem is None:
break
members.append(_cstr(mem, encoding=encoding))
astuple = _GroupTuple(
gr_name=_cstr(self.gr_name, encoding=encoding),
gr_passwd=_cstr(self.gr_passwd, encoding=encoding),
gr_gid=self.gr_gid,
gr_mem=members
)
return astuple
_PasswdTuple = collections.namedtuple(
'_PasswdTuple',
('pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos', 'pw_dir', 'pw_shell')
)
class _PASSWD(ctypes.Structure):
_fields_ = (('pw_name', ctypes.c_char_p),
('pw_passwd', ctypes.c_char_p),
('pw_uid', _c_uid_t),
('pw_gid', _c_gid_t),
('pw_gecos', ctypes.c_char_p),
('pw_dir', ctypes.c_char_p),
('pw_shell', ctypes.c_char_p))
def to_tuple(self, encoding='utf-8'):
astuple = _PasswdTuple(
pw_name=_cstr(self.pw_name, encoding=encoding),
pw_passwd=_cstr(self.pw_passwd, encoding=encoding),
pw_uid=self.pw_uid,
pw_gid=self.pw_gid,
pw_gecos=_cstr(self.pw_gecos, encoding=encoding),
pw_dir=_cstr(self.pw_dir, encoding=encoding),
pw_shell=_cstr(self.pw_shell, encoding=encoding)
)
return astuple
_libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('libc'))
_libc_getgrnam = _libc.getgrnam
_libc_getgrnam.argtypes = [ctypes.c_char_p]
_libc_getgrnam.restype = ctypes.POINTER(_GROUP)
_libc_getgrouplist = _libc.getgrouplist
_libc_getgrouplist.argtypes = [ctypes.c_char_p, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_int)]
_libc_getgrouplist.restype = ctypes.c_int32
_libc_getpwnam = _libc.getpwnam
_libc_getpwnam.argtypes = [ctypes.c_char_p]
_libc_getpwnam.restype = ctypes.POINTER(_PASSWD)
_libc_getpwuid = _libc.getpwuid
_libc_getpwuid.argtypes = [_c_uid_t]
_libc_getpwuid.restype = ctypes.POINTER(_PASSWD)
def getgrnam(name, encoding='utf-8'):
"""
Get the structure containing the fields from the specified entry in the
group database. See
`getgrnam(3) <http://man7.org/linux/man-pages/man3/getgrnam.3.html>`_ for
more information.
:param str name: The group name to look up.
:param str encoding: The encoding to use for strings.
:return: The entry from the group database or ``None`` if it was not found.
:rtype: tuple
"""
name = _cbytes(name, encoding=encoding)
c_pgroup = _libc_getgrnam(name)
if not c_pgroup:
return None
return c_pgroup.contents.to_tuple()
def getgrouplist(user, group=constants.AUTOMATIC, encoding='utf-8'):
"""
Get the groups that the specified user belongs to. If *group* is not
specified, it will be looked up from the password record for *user*. See
`getgrouplist(3) <http://man7.org/linux/man-pages/man3/getgrouplist.3.html>`_
for more information.
:param str user: The user name to look up.
:param int group: An optional group to add to the returned groups.
:param str encoding: The encoding to use for strings.
:return: The group IDs that *user* belongs to.
:rtype: tuple
"""
user = _cbytes(user, encoding=encoding)
ngroups = 20
ngrouplist = ctypes.c_int(ngroups)
if group is constants.AUTOMATIC:
group = getpwnam(user).pw_gid
elif not isinstance(group, int):
raise TypeError('group must be AUTOMATIC or an integer')
grouplist = (ctypes.c_uint * ngroups)()
ct = _libc_getgrouplist(user, group, ctypes.cast(ctypes.byref(grouplist), ctypes.POINTER(ctypes.c_uint)), ctypes.byref(ngrouplist))
if ct == -1:
grouplist = (ctypes.c_uint * int(ngrouplist.value))()
ct = _libc_getgrouplist(user, group, ctypes.cast(ctypes.byref(grouplist), ctypes.POINTER(ctypes.c_uint)), ctypes.byref(ngrouplist))
return tuple(grouplist[:ct])
def getpwnam(name, encoding='utf-8'):
"""
Get the structure containing the fields from the specified entry in the
password database. See
`getpwnam(3) <http://man7.org/linux/man-pages/man3/getpwnam.3.html>`_ for
more information.
:param str name: The user name to look up.
:param str encoding: The encoding to use for strings.
:return: The entry from the user database or ``None`` if it was not found.
:rtype: tuple
"""
name = _cbytes(name, encoding=encoding)
c_ppasswd = _libc_getpwnam(name)
if not c_ppasswd:
return None
return c_ppasswd.contents.to_tuple()
def getpwuid(uid):
"""
Get the structure containing the fields from the specified entry in the
password database. See
`getpwuid(3) <http://man7.org/linux/man-pages/man3/getpwuid.3.html>`_ for
more information.
:param int uid: The user id to look up.
:return: The entry from the user database or ``None`` if it was not found.
:rtype: tuple
"""
c_ppasswd = _libc_getpwuid(uid)
if not c_ppasswd:
return None
return c_ppasswd.contents.to_tuple()
| |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.common import constants
from neutron.common import utils
from neutron import context as neutron_context
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as plugin_constants
LOG = logging.getLogger(__name__)
class L3RpcCallbackMixin(object):
"""A mix-in that enable L3 agent rpc support in plugin implementations."""
def sync_routers(self, context, **kwargs):
"""Sync routers according to filters to a specific agent.
@param context: contain user information
@param kwargs: host, router_ids
@return: a list of routers
with their interfaces and floating_ips
"""
router_ids = kwargs.get('router_ids')
host = kwargs.get('host')
context = neutron_context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
if not l3plugin:
routers = {}
LOG.error(_('No plugin for L3 routing registered! Will reply '
'to l3 agent with empty router dictionary.'))
elif utils.is_extension_supported(
l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.router_auto_schedule:
l3plugin.auto_schedule_routers(context, host, router_ids)
routers = l3plugin.list_active_sync_routers_on_active_l3_agent(
context, host, router_ids)
else:
routers = l3plugin.get_sync_data(context, router_ids)
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.PORT_BINDING_EXT_ALIAS):
self._ensure_host_set_on_ports(context, plugin, host, routers)
LOG.debug(_("Routers returned to l3 agent:\n %s"),
jsonutils.dumps(routers, indent=5))
return routers
def _ensure_host_set_on_ports(self, context, plugin, host, routers):
for router in routers:
LOG.debug(_("Checking router: %(id)s for host: %(host)s"),
{'id': router['id'], 'host': host})
if router.get('gw_port') and router.get('distributed'):
self._ensure_host_set_on_port(context, plugin,
router.get('gw_port_host'),
router.get('gw_port'),
router['id'])
for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
self._ensure_host_set_on_port(context, plugin,
router.get('gw_port_host'),
p, router['id'])
else:
self._ensure_host_set_on_port(context, plugin, host,
router.get('gw_port'),
router['id'])
for interface in router.get(constants.INTERFACE_KEY, []):
self._ensure_host_set_on_port(context, plugin, host,
interface, router['id'])
def _ensure_host_set_on_port(self, context, plugin, host, port,
router_id=None):
if (port and
(port.get('device_owner') !=
constants.DEVICE_OWNER_DVR_INTERFACE and
port.get(portbindings.HOST_ID) != host or
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_BINDING_FAILED)):
# All ports, including ports created for SNAT'ing for
# DVR are handled here
plugin.update_port(context, port['id'],
{'port': {portbindings.HOST_ID: host}})
elif (port and
port.get('device_owner') ==
constants.DEVICE_OWNER_DVR_INTERFACE):
# Ports that are DVR interfaces have multiple bindings (based on
# of hosts on which DVR router interfaces are spawned). Such
# bindings are created/updated here by invoking
# update_dvr_port_binding
plugin.update_dvr_port_binding(context, port['id'],
{'port':
{portbindings.HOST_ID: host,
'device_id': router_id}
})
def get_external_network_id(self, context, **kwargs):
"""Get one external network id for l3 agent.
l3 agent expects only on external network when it performs
this query.
"""
context = neutron_context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
net_id = plugin.get_external_network_id(context)
LOG.debug(_("External network ID returned to l3 agent: %s"),
net_id)
return net_id
def get_service_plugin_list(self, context, **kwargs):
plugins = manager.NeutronManager.get_service_plugins()
return plugins.keys()
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Update operational status for a floating IP."""
l3_plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in fip_statuses.iteritems():
LOG.debug(_("New status for floating IP %(floatingip_id)s: "
"%(status)s"), {'floatingip_id': floatingip_id,
'status': status})
try:
l3_plugin.update_floatingip_status(context,
floatingip_id,
status)
except l3.FloatingIPNotFound:
LOG.debug(_("Floating IP: %s no longer present."),
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = l3_plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
# FIXME(salv-orlando): Filtering in code should be avoided.
# the plugin should offer a way to specify a null filter
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
l3_plugin.update_floatingip_status(
context, fip_id, constants.FLOATINGIP_STATUS_DOWN)
def get_ports_by_subnet(self, context, **kwargs):
"""DVR: RPC called by dvr-agent to get all ports for subnet."""
subnet_id = kwargs.get('subnet_id')
LOG.debug("DVR: subnet_id: %s", subnet_id)
filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
plugin = manager.NeutronManager.get_plugin()
return plugin.get_ports(context, filters=filters)
def get_agent_gateway_port(self, context, **kwargs):
"""Get Agent Gateway port for FIP.
l3 agent expects an Agent Gateway Port to be returned
for this query.
"""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
agent_port = l3plugin.create_fip_agent_gw_port_if_not_exists(
admin_ctx, network_id, host)
self._ensure_host_set_on_port(admin_ctx, plugin, host,
agent_port)
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
'host %(host)s', {'agent_port': agent_port,
'host': host})
return agent_port
def get_snat_router_interface_ports(self, context, **kwargs):
"""Get SNAT serviced Router Port List.
The Service Node that hosts the SNAT service requires
the ports to service the router interfaces.
This function will check if any available ports, if not
it will create ports on the routers interfaces and
will send a list to the L3 agent.
"""
router_id = kwargs.get('router_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
snat_port_list = l3plugin.create_snat_intf_port_list_if_not_exists(
admin_ctx, router_id)
for p in snat_port_list:
self._ensure_host_set_on_port(admin_ctx, plugin, host, p)
LOG.debug('SNAT interface ports returned : %(snat_port_list)s '
'and on host %(host)s', {'snat_port_list': snat_port_list,
'host': host})
return snat_port_list
| |
# -*- coding: utf-8 -*-
import datetime
import sys
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
#from ietf.person.models import Person
Person = orm['person.Person']
# can not use custom manager
wanda = Person.objects.get(user__username = 'wnl') # Wanda Lo
Meeting = orm['meeting.meeting']
ScheduledSession = orm['meeting.ScheduledSession']
Schedule = orm['meeting.Schedule']
for mtg in Meeting.objects.all():
sys.stdout.write ("Processing meeting %s.." % (mtg.number))
try:
if mtg.agenda is not None:
# assume that we have done this meeting already.
sys.stdout.write("already done\n")
continue
except Meeting.DoesNotExist:
pass
na = Schedule(name=("official_%s"%(mtg.number))[0:15],
owner=wanda,
meeting = mtg,
visible=True, public=True)
na.save()
mtg.agenda = na
mtg.save()
sys.stdout.write("\n creating schedule %s\n" %(na.name))
for slot in mtg.timeslot_set.all():
session = slot.session
# skip slots with no sessions.
if session is None:
wg = "none"
else:
wg = session.group.acronym
sys.stdout.write (" session for wg:%s \r" % (wg))
ss = ScheduledSession(timeslot = slot,
session = slot.session,
schedule = na,
notes = "Auto created")
ss.save()
sys.stdout.write("\n")
#
#
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.docalias': {
'Meta': {'object_name': 'DocAlias'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'doc.document': {
'Meta': {'object_name': 'Document'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocumentAuthor']", 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'reversely_related_document_set'", 'blank': 'True', 'through': "orm['doc.RelatedDocument']", 'to': "orm['doc.DocAlias']"}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.documentauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocumentAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'doc.relateddocument': {
'Meta': {'object_name': 'RelatedDocument'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocAlias']"})
},
'doc.state': {
'Meta': {'ordering': "['type', 'order']", 'object_name': 'State'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'previous_states'", 'symmetrical': 'False', 'to': "orm['doc.State']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.statetype': {
'Meta': {'object_name': 'StateType'},
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
'group.group': {
'Meta': {'object_name': 'Group'},
'acronym': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False', 'blank': 'True'})
},
'meeting.constraint': {
'Meta': {'object_name': 'Constraint'},
'day': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Meeting']"}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.ConstraintName']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constraint_source_set'", 'to': "orm['group.Group']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constraint_target_set'", 'null': 'True', 'to': "orm['group.Group']"})
},
'meeting.meeting': {
'Meta': {'object_name': 'Meeting'},
'break_area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Schedule']", 'blank': 'True', 'null': 'True'}),
'reg_area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.MeetingTypeName']"}),
'venue_addr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'meeting.schedule': {
'Meta': {'object_name': 'Schedule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Meeting']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'meeting.room': {
'Meta': {'object_name': 'Room'},
'capacity': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Meeting']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meeting.scheduledsession': {
'Meta': {'object_name': 'ScheduledSession'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Schedule']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['meeting.Session']", 'null': 'True'}),
'timeslot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.TimeSlot']"})
},
'meeting.session': {
'Meta': {'object_name': 'Session'},
'agenda_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attendees': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'materials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.Document']", 'symmetrical': 'False', 'blank': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Meeting']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'requested': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'requested_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'requested_duration': ('ietf.meeting.timedeltafield.TimedeltaField', [], {'default': '0'}),
'scheduled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.SessionStatusName']"})
},
'meeting.timeslot': {
'Meta': {'object_name': 'TimeSlot'},
'duration': ('ietf.meeting.timedeltafield.TimedeltaField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Room']", 'null': 'True', 'blank': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Meeting']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meeting.Session']", 'null': 'True', 'blank': 'True'}),
'sessions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'slots'", 'to': "orm['meeting.Session']", 'through': "orm['meeting.ScheduledSession']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'show_location': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.TimeSlotTypeName']"})
},
'name.constraintname': {
'Meta': {'ordering': "['order']", 'object_name': 'ConstraintName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docrelationshipname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocRelationshipName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctagname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTagName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupstatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.grouptypename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.intendedstdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'IntendedStdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.meetingtypename': {
'Meta': {'ordering': "['order']", 'object_name': 'MeetingTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.sessionstatusname': {
'Meta': {'ordering': "['order']", 'object_name': 'SessionStatusName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.stdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'StdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.streamname': {
'Meta': {'ordering': "['order']", 'object_name': 'StreamName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.timeslottypename': {
'Meta': {'ordering': "['order']", 'object_name': 'TimeSlotTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'person.email': {
'Meta': {'object_name': 'Email'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['meeting']
symmetrical = True
| |
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import mock
import requests
from oslo_serialization import jsonutils
from nailgun.test import base
from nailgun.orchestrator import tasks_templates
from nailgun.settings import settings
class TestMakeTask(base.BaseTestCase):
def test_make_ubuntu_sources_task(self):
result = tasks_templates.make_ubuntu_sources_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'deb',
'uri': 'http://url',
'suite': '/',
'section': '',
'priority': 1001
})
self.assertEqual(
result,
{'parameters': {
'data': 'deb http://url / ',
'path': '/etc/apt/sources.list.d/plugin_name.list'},
'type': 'upload_file',
'uids': [1, 2, 3]})
def test_make_ubuntu_apt_disable_ipv6(self):
result = tasks_templates.make_ubuntu_apt_disable_ipv6([1, 2, 3])
self.assertEqual(
result,
{'parameters': {
'data': 'Acquire::ForceIPv4 "true";\n',
'path': '/etc/apt/apt.conf.d/05disable-ipv6'},
'type': 'upload_file',
'uids': [1, 2, 3]})
def test_make_ubuntu_unauth_repos_task(self):
result = tasks_templates.make_ubuntu_unauth_repos_task([1, 2, 3])
self.assertEqual(
result,
{'parameters': {
'data': 'APT::Get::AllowUnauthenticated 1;\n',
'path': '/etc/apt/apt.conf.d/02mirantis-allow-unsigned'},
'type': 'upload_file',
'uids': [1, 2, 3]})
def test_make_centos_repo_task_w_priority(self):
result = tasks_templates.make_centos_repo_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'rpm',
'uri': 'http://url',
'priority': 1
})
self.assertEqual(
result,
{'parameters': {
'data': ('[plugin_name]\nname=Plugin plugin_name repository\n'
'baseurl=http://url\ngpgcheck=0\npriority=1'),
'path': '/etc/yum.repos.d/plugin_name.repo'},
'type': 'upload_file',
'uids': [1, 2, 3]})
def test_make_centos_repo_task_wo_priority(self):
result = tasks_templates.make_centos_repo_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'rpm',
'uri': 'http://url',
})
self.assertEqual(
result,
{'parameters': {
'data': ('[plugin_name]\nname=Plugin plugin_name repository\n'
'baseurl=http://url\ngpgcheck=0'),
'path': '/etc/yum.repos.d/plugin_name.repo'},
'type': 'upload_file',
'uids': [1, 2, 3]})
def test_make_reboot_task(self):
result = tasks_templates.make_reboot_task(
[1, 2, 3],
{'parameters': {'timeout': 10}})
self.assertEqual(
result,
{'type': 'reboot',
'uids': [1, 2, 3],
'parameters': {
'timeout': 10}})
def test_make_provisioning_images_task(self):
result = tasks_templates.make_provisioning_images_task(
[1, 2, 3],
repos=[
{'name': 'repo', 'uri': 'http://some'}
],
provision_data={
'codename': 'trusty',
'image_data': {
'/mount': {
'format': 'ext4',
'uri': 'http://uri'
}
}},
cid=123)
fuel_image_conf = {
"image_data": {
"/mount": {
"uri": "http://uri",
"format": "ext4"
}
},
"output": "/var/www/nailgun/targetimages",
"repos": [
{
"name": "repo",
"uri": "http://some"
}
],
"codename": "trusty"
}
self.assertEqual(result["type"], "shell")
self.assertEqual(result["uids"], [1, 2, 3])
params = result["parameters"].copy()
del params["cmd"]
self.assertEqual(
params,
{
'timeout': settings.PROVISIONING_IMAGES_BUILD_TIMEOUT,
'retries': 1,
'interval': 1,
'cwd': '/',
}
)
cmd = result["parameters"]["cmd"].lstrip(
"fa_build_image --image_build_dir /var/lib/fuel/ibp "
"--log-file /var/log/fuel-agent-env-123.log "
"--data_driver nailgun_build_image --input_data '").rstrip("'")
self.assertEqual(jsonutils.loads(cmd), fuel_image_conf)
def test_make_download_debian_installer_task(self):
remote_kernel = ('http://some/a/dists/trusty/main/'
'installer-amd64/current/images/'
'netboot/ubuntu-installer/amd64/linux')
remote_initrd = ('http://some/a/dists/trusty/main/'
'installer-amd64/current/images/'
'netboot/ubuntu-installer/amd64/initrd.gz')
relative_kernel = ('dists/trusty/main/installer-amd64/current/'
'images/netboot/ubuntu-installer/amd64/linux')
relative_initrd = ('dists/trusty/main/installer-amd64/current/'
'images/netboot/ubuntu-installer/amd64/initrd.gz')
local_kernel = '/var/www/nailgun/ubuntu/x86_64/images/linux'
local_initrd = '/var/www/nailgun/ubuntu/x86_64/images/initrd.gz'
# we have to be able to handle both cases with trailing slash
# and without it
for uri in ('http://some/a/', 'http://some/a'):
result = tasks_templates.make_download_debian_installer_task(
[1, 2, 3],
repos=[{'name': 'repo', 'uri': uri}],
installer_kernel={'remote_relative': relative_kernel,
'local': local_kernel},
installer_initrd={'remote_relative': relative_initrd,
'local': local_initrd})
self.assertEqual(result, {
'type': 'shell',
'uids': [1, 2, 3],
'parameters': {
'cmd': ('LOCAL_KERNEL_FILE={local_kernel} '
'LOCAL_INITRD_FILE={local_initrd} '
'download-debian-installer '
'{remote_kernel} {remote_initrd}').format(
local_kernel=local_kernel,
local_initrd=local_initrd,
remote_kernel=remote_kernel,
remote_initrd=remote_initrd),
'timeout': 600,
'retries': 1,
'interval': 1,
'cwd': '/',
}})
class TestMakeUbuntuPreferencesTask(base.BaseTestCase):
_fake_debian_release = '''
Origin: TestOrigin
Label: TestLabel
Archive: test-archive
Codename: testcodename
'''
_re_pin = re.compile('Pin: release (.*)')
def _check_apt_preferences(self, data, sections, priority):
pins = data.split('\n\n')
self.assertEqual(len(pins), 1)
conditions = self._re_pin.search(pins[0]).group(1).split(',')
# check general template
self.assertRegexpMatches(
data, (
'Package: \*\n'
'Pin: release .*\n'
'Pin-Priority: {0}'.format(priority)
))
# check pin
expected_conditions = [
'a=test-archive',
'l=TestLabel',
'n=testcodename',
'o=TestOrigin',
]
self.assertItemsEqual(conditions, expected_conditions)
@mock.patch('nailgun.utils.debian.requests.get',
return_value=mock.Mock(text=_fake_debian_release))
def test_make_ubuntu_preferences_task(self, _):
result = tasks_templates.make_ubuntu_preferences_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'deb',
'uri': 'http://url',
'suite': 'test-archive',
'section': 'main universe',
'priority': 1004
})
data = result['parameters'].pop('data')
self.assertEqual(
result,
{'parameters': {'path': '/etc/apt/preferences.d/plugin_name.pref'},
'type': 'upload_file',
'uids': [1, 2, 3]})
self._check_apt_preferences(data, ['main', 'universe'], 1004)
@mock.patch('nailgun.utils.debian.requests.get',
return_value=mock.Mock(text=_fake_debian_release))
def test_make_ubuntu_preferences_task_flat(self, _):
result = tasks_templates.make_ubuntu_preferences_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'deb',
'uri': 'http://url',
'suite': '/',
'section': '',
'priority': 1004
})
data = result['parameters'].pop('data')
self.assertEqual(
result,
{'parameters': {'path': '/etc/apt/preferences.d/plugin_name.pref'},
'type': 'upload_file',
'uids': [1, 2, 3]})
self._check_apt_preferences(data, [], 1004)
@mock.patch('nailgun.utils.debian.requests.get')
def test_make_ubuntu_preferences_task_returns_none_if_errors(self, m_get):
r = requests.Response()
r.status_code = 404
m_get.return_value = r
result = tasks_templates.make_ubuntu_preferences_task(
[1, 2, 3],
{
'name': 'plugin_name',
'type': 'deb',
'uri': 'http://url',
'suite': 'test-archive',
'section': 'main universe',
'priority': 1004
})
self.assertIsNone(result)
| |
'''
create pwmCss, pwmAuth
define fitness
initPopulation as randomized
scoring
'''
import random
import threading
import time
from libgenetic.libgenetic import EvolutionBasic, Selections, Crossovers, Mutations, Generation, GABase
from libgenetic.pwm import PWM
import numpy as np
BASES_MAP = {0:'A', 1:'C', 2:'G', 3:'T'}
BASE_ODDS_MAP = {'A':0.28, 'C': 0.22, 'G': 0.22, 'T': 0.28}
class EI5pSpliceSitesGAModel:
'''
Wrapper for 5 prime splice site 9mers with PWM score fitness
'''
@staticmethod
def load_data_tsv(filename):
'''
Accepts tab separated 9-mers
'''
ret = []
with open(filename) as f:
content = f.read()
lines = content.split('\n')
for line in lines:
if line:
bases = line.split('\t')
ret.append(bases)
return ret
@staticmethod
def mergeSSData(data1, data2):
npdata1 = np.array(data1)
npdata2 = np.array(data2)
retnp = np.concatenate((npdata1, npdata2))
ret = retnp.tolist()
return ret
def __init__(self, nmerArr):
self._nmerArr = nmerArr
self._pwm = self._computePwm(nmerArr)
@property
def rawdata(self):
return self._nmerArr
def _computePwm(self, nmerArr):
symbolSet = set(['A', 'C', 'G', 'T'])
symbolOddsMap = BASE_ODDS_MAP
pwm = PWM(nmerArr, symbolSet, symbolOddsMap)
return pwm
def isValid9merSpliceSite(self, ninemer):
if len(ninemer) != 9:
raise Exception("dimension mismatch")
return ninemer[3] == 'G' and ninemer[4] == 'T'
def fitness(self, ninemer):
if len(ninemer) != 9:
raise Exception("dimension mismatch")
baseScore = self._pwm.score(ninemer)
penalty = 0
if not self.isValid9merSpliceSite(ninemer):
# print("Invalid ninemer: %s" % ninemer)
penalty = 9 * 1000 #TODO: max logodds score; log2(1 / min(symOdds))
ret = baseScore - penalty
return ret
def baseFlip(self, baseValue):
'''
Return any random base other than given base
'''
randomBase = random.randint(0, 3)
flipValue = BASES_MAP[randomBase] # BASES_MAP = {0:'A', 1:'C', 2:'G', 3:'T'}
while flipValue == baseValue:
randomBase = random.randint(0, 3)
flipValue = BASES_MAP[randomBase]
return flipValue
@staticmethod
def crossover_1p(sol1, sol2):
sol1Len = len(sol1)
site = random.randint(1, sol1Len - 2)
negatives = [3, 4] # prevent crossover through GT at sites 3, 4, first and last bases
site = Crossovers.pick_random_site(rangeLen = 9, negativeSites = negatives)
ret = Crossovers.one_point(sol1, sol2, site = site)
return ret
@staticmethod
def crossover_2p(sol1, sol2):
sol1Len = len(sol1)
site = random.randint(1, sol1Len - 2)
negatives = [3, 4] # prevent crossover through GT at sites 3, 4, first and last bases
site1 = Crossovers.pick_random_site(rangeLen = 9, negativeSites = negatives)
negatives = negatives + [site1]
site2 = Crossovers.pick_random_site(rangeLen = 9, negativeSites = negatives)
ret = Crossovers.two_point(sol1, sol2, site1 = site1, site2 = site2)
return ret
@staticmethod
def crossover_uniform(sol1, sol2, swap_prob = 0.5):
negatives = [3, 4] # prevent crossover through GT at sites 3, 4, first and last bases
ret = Crossovers.uniform(sol1, sol2, swap_prob = swap_prob, negativeSites = negatives)
return ret
@staticmethod
def crossover_uniform_orderbased(sol1, sol2):
negatives = [3, 4] # prevent crossover through GT at sites 3, 4, first and last bases
ret = Crossovers.uniform_orderbased(sol1, sol2, negativeSites = negatives)
return ret
def mutate(self, solution):
return Mutations.provided_flip(solution = solution, flipProvider = self.baseFlip, negativeSites = [3,4])
def __str__(self):
pass
# return "P: %s\nW: %s\nC: %s\nS: %s" % (str(self._profits), str(self._weights), str(self._capacity), str(self._solution))
class GASpliceSitesThread(threading.Thread):
def __init__(self, gaModel, initPopulation, genCount, crossover_provider = None, recombine_provider = None,
crossoverProbability = 0.1, mutationProbability = 0.1):
threading.Thread.__init__(self)
self._gaModel = gaModel
self._initPopulation = initPopulation
self._genCount = genCount
self._crossoverProbability = crossoverProbability
self._mutationProbability = mutationProbability
if not crossover_provider:
self._crossover = EI5pSpliceSitesGAModel.crossover_1p
else:
self._crossover = crossover_provider(self._gaModel)
if not recombine_provider:
self._recombine = lambda population: Selections.ranked(population, self._gaModel.fitness)
else:
self._recombine = recombine_provider(self._gaModel)
self._gaBase = None
def run(self):
gen0 = Generation(self._initPopulation)
recombine = self._recombine
crossover = self._crossover
mutator = self._gaModel.mutate
evolution = EvolutionBasic(select = recombine, crossover = crossover, mutate = mutator,
crossoverProbability = self._crossoverProbability,
mutationProbability = self._mutationProbability)
gaBase = GABase(evolution, gen0, self._gaModel.fitness)
gaBase.execute(maxGens=self._genCount)
self._gaBase = gaBase
@property
def gaBase(self):
return self._gaBase
def random5primeSpliceSitesPopulation(M, N, cardinality=4):
randomgen = lambda: random.randint(0, cardinality - 1)
basesMap = BASES_MAP # BASES_MAP = {0:'A', 1:'C', 2:'G', 3:'T'}
ret = []
for i in range(M):
sol = []
for j in range(N):
if j == 3:
bit = 2 #'G'
elif j == 4:
bit = 3 #'T'
else:
bit = randomgen()
sol.append(basesMap[bit])
ret.append(sol)
return ret
class MatchUtils:
@staticmethod
def check_match(population, ninemerData):
'''
Args:
population: candidate 9mers
ninemerData: training 9mers
Returns:
percent match of population in ninemerData
'''
ninemerStrData = ["".join(nm) for nm in ninemerData]
populationStrData = ["".join(nm) for nm in population]
ret = 0
for solution in populationStrData:
if solution in ninemerStrData:
ret += 1
ret /= float(len(populationStrData))
return ret
@staticmethod
def find_best_gens(gaBase):
genFitness = [gen._bestFitness for gen in gaBase._generations]
bestFitness_all = max(genFitness)
bestGenArr = filter(lambda g: g._bestFitness == bestFitness_all, gaBase._generations)
return bestGenArr
@staticmethod
def match_stat(gaBase, authssData, cssData):
lastGen = gaBase._generations[-1]
bestGens = MatchUtils.find_best_gens(gaBase)
# bestgen_scoreCss = [MatchUtils.check_match(gen._population, cssData) for gen in bestGens]
# bestgen_scoreCss = max(bestgen_scoreCss)
# bestgen_scoreAuth = [MatchUtils.check_match(gen._population, authssData) for gen in bestGens]
# bestgen_scoreAuth = max(bestgen_scoreAuth)
bestgen_scoreCss = -float('inf')
bestgen_scoreAuth = -float('inf')
bestgen_genCss = -1
bestgen_genAuth = -1
for gen in bestGens:
scoreCss = MatchUtils.check_match(gen._population, cssData)
if scoreCss > bestgen_scoreCss:
bestgen_scoreCss = scoreCss
bestgen_genCss = gen._genIndex
scoreAuth = MatchUtils.check_match(gen._population, authssData)
if scoreAuth > bestgen_scoreAuth:
bestgen_scoreAuth = scoreAuth
bestgen_genAuth = gen._genIndex
lastgen_scoreCss = MatchUtils.check_match(lastGen._population, cssData)
lastgen_scoreAuth = MatchUtils.check_match(lastGen._population, authssData)
return (bestgen_scoreCss, bestgen_scoreAuth, bestgen_genCss, bestgen_genAuth, lastgen_scoreCss, lastgen_scoreAuth)
@staticmethod
def match_stat_2d(gaBase, selfData, competeData):
bestGens = MatchUtils.find_best_gens(gaBase)
bestgen_scoreSelf = -float('inf')
bestgen_scoreCompete = -float('inf')
bestgen_genSelf = -1
bestgen_genCompete = -1
for gen in bestGens:
scoreCompete = MatchUtils.check_match(gen._population, competeData)
if scoreCompete > bestgen_scoreCompete:
bestgen_scoreCompete = scoreCompete
bestgen_genCompete = gen._genIndex
scoreSelf = MatchUtils.check_match(gen._population, selfData)
if scoreSelf > bestgen_scoreSelf:
bestgen_scoreSelf = scoreSelf
bestgen_genSelf = gen._genIndex
bestgen_scoreCompete = 1 - bestgen_scoreCompete #invert compete score
retFitness = None
if bestGens:
retFitness = bestGens[0]._bestFitness
return ((bestgen_scoreSelf, bestgen_scoreCompete), (bestgen_genSelf, bestgen_genCompete), retFitness)
@staticmethod
def match_stat_1d(gaBase, ssData):
bestGens = MatchUtils.find_best_gens(gaBase)
bestgen_score = -float('inf')
bestgen_gen = -1
for gen in bestGens:
score = MatchUtils.check_match(gen._population, ssData)
if score > bestgen_score:
bestgen_score = score
bestgen_gen = gen._genIndex
retFitness = None
if bestGens:
retFitness = bestGens[0]._bestFitness
return (bestgen_score, bestgen_gen, retFitness)
def main(cssFile = 'data/dbass-prats/CrypticSpliceSite.tsv',
authssFile = 'data/hs3d/Exon-Intron_5prime/EI_true_9.tsv',
generationSize = 10, genCount = 10,
crossoverProbability = 0.1, mutationProbability = 0.1):
'''
Compare AuthPWMGA <-> CSSPWMGA
AuthPWMGA genN should predominantly carry stochastic properties of authentic SS data
CSSPWMGA genN should predominantly carry stochastic properties of cryptic SS data
'''
cssGAData = EI5pSpliceSitesGAModel.load_data_tsv(cssFile)
authssGAData = EI5pSpliceSitesGAModel.load_data_tsv(authssFile)
cssGASpliceSites = EI5pSpliceSitesGAModel(cssGAData)
authGASpliceSites = EI5pSpliceSitesGAModel(authssGAData)
M = generationSize
N = 9 # 9-mers
initPopulation = random5primeSpliceSitesPopulation(M, N)
print(initPopulation)
recombine_provider = lambda gaModel: lambda population: Selections.ranked(population, gaModel.fitness)
crossover_provider = lambda gaModel: EI5pSpliceSitesGAModel.crossover_uniform_orderbased
authThread = GASpliceSitesThread(authGASpliceSites, initPopulation, genCount = genCount,
crossoverProbability = crossoverProbability, mutationProbability = mutationProbability,
recombine_provider = recombine_provider,
crossover_provider = crossover_provider)
cssThread = GASpliceSitesThread(cssGASpliceSites, initPopulation, genCount = genCount,
crossoverProbability = crossoverProbability, mutationProbability = mutationProbability,
recombine_provider = recombine_provider,
crossover_provider = crossover_provider)
cssThread.start()
cssThread.join()
cssGABase = cssThread.gaBase
authThread.start()
authThread.join()
authGABase = authThread.gaBase
stats = []
stats.append(['TRAINER', 'bestgen_scoreCss', 'bestgen_scoreAuth', 'bestgen_genCss', 'bestgen_genAuth', 'lastgen_scoreCss', 'lastgen_scoreAuth'])
(bestgen_scoreCss, bestgen_scoreAuth, bestgen_genCss, bestgen_genAuth, lastgen_scoreCss, lastgen_scoreAuth) = \
MatchUtils.match_stat(cssGABase, authssGAData, cssGAData)
print("\nCSS GAStats:")
print("BESTGEN: cssGen_X_cssData: %s" % str(bestgen_scoreCss))
print("BESTGEN: cssGen_X_authData: %s" % str(bestgen_scoreAuth))
print("BESTGENIDX: cssGen_X_cssData: %s" % str(bestgen_genCss))
print("BESTGENIDX: cssGen_X_authData: %s" % str(bestgen_genAuth))
print("LASTGEN: cssGen_X_cssData: %s" % str(lastgen_scoreCss))
print("LASTGEN: cssGen_X_authData: %s" % str(lastgen_scoreAuth))
stats.append(['cssGABase', bestgen_scoreCss, bestgen_scoreAuth, bestgen_genCss, bestgen_genAuth, lastgen_scoreCss, lastgen_scoreAuth])
(bestgen_scoreCss, bestgen_scoreAuth, bestgen_genCss, bestgen_genAuth, lastgen_scoreCss, lastgen_scoreAuth) = \
MatchUtils.match_stat(authGABase, authssGAData, cssGAData)
print("\nAUTH GAStats:")
print("BESTGEN: authGen_X_cssData: %s" % str(bestgen_scoreCss))
print("BESTGEN: authGen_X_authData: %s" % str(bestgen_scoreAuth))
print("BESTGENIDX: authGen_X_cssData: %s" % str(bestgen_genCss))
print("BESTGENIDX: authGen_X_authData: %s" % str(bestgen_genAuth))
print("LASTGEN: authGen_X_cssData: %s" % str(lastgen_scoreCss))
print("LASTGEN: authGen_X_authData: %s" % str(lastgen_scoreAuth))
stats.append(['authGABase', bestgen_scoreCss, bestgen_scoreAuth, bestgen_genCss, bestgen_genAuth, lastgen_scoreCss, lastgen_scoreAuth])
from tabulate import tabulate
print("\nRESULTS:")
print(tabulate(stats, headers='firstrow'))
return stats
if __name__ == '__main__':
import os
import argparse
parser = argparse.ArgumentParser(description='libgenetic implementation for Splice Site evolution using PWM')
parser.add_argument('--gen_count', type=int, help='generation count', required=True)
parser.add_argument('--gen_size', type=int, help='generation size', required=True)
parser.add_argument('--xover_prob', type=float, help='crossover probability', default=0.7)
parser.add_argument('--mut_prob', type=float, help='mutation probability', default=0.1)
parser.add_argument('--css_file', help='path to css tsv data file', default='%s/data/dbass-prats/CrypticSpliceSite.tsv' % os.getcwd())
parser.add_argument('--authss_file', help='path to authss tsv data file', default='%s/data/hs3d/Exon-Intron_5prime/EI_true_9.tsv' % os.getcwd())
args = parser.parse_args()
main(cssFile = args.css_file, authssFile = args.authss_file,
generationSize = args.gen_size, genCount = args.gen_count,
crossoverProbability = args.xover_prob, mutationProbability = args.mut_prob)
| |
#
# ppo.py, doom-net
#
# Created by Andrey Kolishchak on 01/21/17.
#
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from device import device
from collections import namedtuple
from ppo_base import PPOBase
import random
class Cells:
def __init__(self, cell_num, cell_size, batch_size, data=None):
self.cell_num = cell_num
self.cell_size = cell_size
self.batch_size = batch_size
if data is None:
self.data = [torch.zeros(batch_size, cell_size, device=device) for _ in range(cell_num)]
else:
self.data = data
def clone(self):
data = [cell.detach().clone() for cell in self.data]
return Cells(self.cell_num, self.cell_size, self.batch_size, data)
def reset(self):
self.data = [cell.detach() for cell in self.data]
def sub_range(self, r1, r2):
data = [cell[r1:r2] for cell in self.data]
return Cells(self.cell_num, self.cell_size, r2-r1, data)
class ResBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, padding=(0, 0)):
super(ResBlock, self).__init__()
self.padding = (padding[1], padding[1], padding[0], padding[0])
padding = (0, 0)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=padding)
self.downsample = None
if stride != 1 or inplanes != planes:
self.downsample = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
def forward(self, x):
residual = x
out = F.pad(x, self.padding, mode='replicate')
out = self.conv1(out)
out = F.relu(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out)
return out
class BaseModel(nn.Module):
def __init__(self, in_channels, button_num, variable_num, frame_num, batch_size):
super(BaseModel, self).__init__()
self.screen_feature_num = 256
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=5, stride=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2)
#self.conv6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(2, 3), stride=1)
self.fc1 = nn.Linear(256 * 2 * 3, 256)
self.screen_features1 = nn.LSTMCell(256 + variable_num + button_num, self.screen_feature_num)
layer1_size = 128
self.action1 = nn.Linear(self.screen_feature_num, layer1_size)
self.action2 = nn.Linear(layer1_size, button_num)
self.value1 = nn.Linear(self.screen_feature_num, layer1_size)
self.value2 = nn.Linear(layer1_size, 1)
self.screens = None
self.frame_num = frame_num
self.batch_size = batch_size
self.button_num = button_num
def forward(self, screen, variables, prev_action, cells, non_terminal, update_cells=True):
# cnn
screen_features = F.relu(self.conv1(screen), inplace=True)
screen_features = F.relu(self.conv2(screen_features), inplace=True)
screen_features = F.relu(self.conv3(screen_features), inplace=True)
screen_features = F.relu(self.conv4(screen_features), inplace=True)
screen_features = F.relu(self.conv5(screen_features), inplace=True)
screen_features = screen_features.view(screen_features.size(0), -1)
screen_features = F.relu(self.fc1(screen_features))
screen_features = torch.cat([screen_features, variables, prev_action], 1)
# rnn
if screen_features.shape[0] <= self.batch_size:
data = cells.data
data = self.screen_features1(screen_features, data)
if update_cells:
cells.data = data
return data[0]
else:
features = []
for i in range(screen_features.shape[0]//self.batch_size):
start = i * self.batch_size
end = start + self.batch_size
cells.data = self.screen_features1(screen_features[start:end], cells.data)
features.append(cells.data[0])
features = torch.cat(features, dim=0)
return features
def get_action(self, features):
action = F.relu(self.action1(features))
action = self.action2(action)
return action
def get_value(self, features):
value = F.relu(self.value1(features))
value = self.value2(value)
return value
def transform_input(self, screen, variables, prev_action):
screen_batch = []
if self.frame_num > 1:
if self.screens is None:
self.screens = [[]] * len(screen)
for idx, screens in enumerate(self.screens):
if len(screens) >= self.frame_num:
screens.pop(0)
screens.append(screen[idx])
if len(screens) == 1:
for i in range(self.frame_num - 1):
screens.append(screen[idx])
screen_batch.append(torch.cat(screens, 0))
screen = torch.stack(screen_batch)
prev_action = torch.zeros(prev_action.shape[0], self.button_num).scatter(-1, prev_action.long(), 1)
return screen.to(device), variables.to(device), prev_action.to(device)
def set_non_terminal(self, non_terminal):
if self.screens is not None:
indexes = torch.nonzero(non_terminal == 0).squeeze()
for idx in range(len(indexes)):
self.screens[indexes[idx]] = []
StepInfo = namedtuple('StepInfo', ['screen', 'variables', 'prev_action', 'log_action', 'value', 'action'])
class PPOScreen(PPOBase):
def __init__(self, args):
self.model = BaseModel(
args.screen_size[0]*args.frame_num, args.button_num, args.variable_num, args.frame_num, args.batch_size
).to(device)
if args.load is not None:
# load weights
state_dict = torch.load(args.load)
self.model.load_state_dict(state_dict)
self.discount = args.episode_discount
self.steps = []
self.rewards = []
self.non_terminals = []
self.non_terminal = torch.ones(args.batch_size, 1)
self.cells = Cells(2, self.model.screen_feature_num, args.batch_size)
self.init_cells = self.cells.clone()
self.optimizer = optim.Adam(self.model.parameters(), lr=args.learning_rate, weight_decay=0, amsgrad=True)
if args.load is not None and os.path.isfile(args.load + '_optimizer.pth'):
optimizer_dict = torch.load(args.load+'_optimizer.pth')
self.optimizer.load_state_dict(optimizer_dict)
print("optimizer loaded")
self.optimizer.zero_grad()
self.args = args
def forward(self, screen, variables, prev_action, non_terminals, action_only=False, save_step_info=False, action=None, action_dist=False):
features = self.model.forward(screen, variables, prev_action, self.cells, non_terminals)
action_prob = self.model.get_action(features)
if action_only:
if action_dist:
action_prob = F.softmax(action_prob, dim=1)
action = torch.multinomial(action_prob, 1)
else:
_, action = action_prob.max(1, keepdim=True)
return action, None, None
action_prob = F.softmax(action_prob, dim=1)
if action is None:
action = torch.multinomial(action_prob, 1)
# value prediction - critic
value = self.model.get_value(features)
# policy log
action_log_prob = action_prob.gather(-1, action).log()
#logits = action_prob.log()
#action_log_prob = logits.gather(-1, action)
entropy = None
#entropy = -(logits * action_prob).sum(-1)
if save_step_info:
# save step info for backward pass
self.steps.append(StepInfo(screen.cpu(), variables, prev_action, action_log_prob, value, action))
return action, action_log_prob, value, entropy
def get_action(self, state, prev_action, action_dist=False):
with torch.set_grad_enabled(False):
action, _, _ = self.forward(
*self.model.transform_input(state.screen, state.variables, prev_action), self.non_terminal, action_only=True, action_dist=action_dist
)
return action
def get_save_action(self, state, prev_action):
with torch.set_grad_enabled(False):
action, _, _, _ = self.forward(
*self.model.transform_input(state.screen, state.variables, prev_action), self.non_terminal, save_step_info=True
)
return action
def set_last_state(self, state, prev_action):
with torch.set_grad_enabled(False):
features = self.model.forward(
*self.model.transform_input(state.screen, state.variables, prev_action),
self.cells, self.non_terminal, update_cells=False
)
value = self.model.get_value(features)
self.steps.append(StepInfo(None, None, None, None, value, None))
def set_reward(self, reward):
self.rewards.append(reward * 0.01) # no clone() b/c of * 0.01
def set_non_terminal(self, non_terminal):
non_terminal = non_terminal.clone()
self.model.set_non_terminal(non_terminal)
self.non_terminals.append(non_terminal)
self.non_terminal = non_terminal
def reset(self):
self.steps = []
self.rewards = []
# termination status is for next state, so first episode state is always non-terminal
self.non_terminals = []
self.cells.reset()
self.init_cells = self.cells.clone()
def backward(self):
rewards = self.rewards
episode_steps = self.steps
non_terminals = self.non_terminals
final_cells = self.cells.clone()
#
# calculate step returns in reverse order
returns = torch.Tensor(len(rewards), *episode_steps[-1].value.shape)
# last step contains only value, take it and delete the step
step_return = episode_steps[-1].value.detach().cpu()
del episode_steps[-1]
for i in range(len(rewards) - 1, -1, -1):
step_return.mul_(non_terminals[i]).mul_(self.discount).add_(rewards[i])
returns[i] = step_return
# do not normalize rewards
# returns = (returns - returns.mean(axis=0)) / (returns.std(axis=0) + 1e-5)
returns = returns.to(device)
#
# calculate advantage
steps_num = len(episode_steps)
advantage = torch.Tensor(*returns.shape)
for i in range(steps_num):
advantage[i] = returns[i] - episode_steps[i].value.detach()
advantage = advantage.view(-1, 1).to(device)
# normalize advantages
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
returns = returns.view(-1, 1)
self.model.train()
screens = torch.cat([step.screen for step in episode_steps], dim=0).to(device)
variables = torch.cat([step.variables for step in episode_steps], dim=0)
prev_actions = torch.cat([step.prev_action for step in episode_steps], dim=0)
non_terminals = torch.cat(non_terminals, dim=0)
actions = torch.cat([step.action for step in episode_steps], dim=0)
old_log_actions = torch.cat([step.log_action for step in episode_steps], dim=0)
for batch in range(10):
self.cells = self.init_cells.clone()
_, log_actions, values, entropy = self.forward(screens, variables, prev_actions, non_terminals, action=actions)
ratio = (log_actions - old_log_actions).exp()
advantage_batch = advantage
policy_loss = - torch.min(
ratio * advantage_batch,
torch.clamp(ratio, 1 - 0.1, 1 + 0.1) * advantage_batch
).mean()
value_loss = F.smooth_l1_loss(values, returns)
loss = policy_loss + value_loss #+ entropy_loss #+ 0.0001*weights_l2
# backpro
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 1)
grads = []
weights = []
for p in self.model.parameters():
if p.grad is not None:
grads.append(p.grad.view(-1))
weights.append(p.view(-1))
grads = torch.cat(grads, 0)
weights = torch.cat(weights, 0)
grads_norm = grads.norm()
weights_norm = weights.norm()
# check for NaN
assert grads_norm == grads_norm
self.optimizer.step()
self.optimizer.zero_grad()
# reset state
self.cells = final_cells
self.reset()
return grads_norm, weights_norm
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def save(self):
torch.save(self.model.state_dict(), self.args.checkpoint_file)
torch.save(self.optimizer.state_dict(), self.args.checkpoint_file + '_optimizer.pth')
| |
# -*- coding: utf-8 -*-
"""
Test aspects to allow fine grained control over what tests are executed.
Several parts of the test infrastructure are implemented as mixins,
such as API result caching and excessive test durations. An unused
mixin to show cache usage is included.
"""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import print_function, unicode_literals
__version__ = '$Id$'
"""
TODO:
skip if the user is blocked.
sysop flag, implement in site & page, and
possibly some of the script tests.
labs flag, for wikidataquery
slow flag
wikiquerydata - quite slow
weblib - also slow
(this class, and a FastTest, could error/pass based
it consumed more than a specified amount of time allowed.)
net flag should disable network libraries
UITestCase:
Not integrated; direct subclass of unittest.TestCase.
"""
import inspect
import itertools
import os
import re
import sys
import time
import warnings
from contextlib import contextmanager
import pywikibot
import pywikibot.config2 as config
from pywikibot import log, Site
from pywikibot.exceptions import ServerError, NoUsername
from pywikibot.site import BaseSite
from pywikibot.family import WikimediaFamily
from pywikibot.comms import http
from pywikibot.data.api import Request as _original_Request
import tests
from tests import unittest, patch_request, unpatch_request
from tests.utils import (
add_metaclass, execute_pwb, DrySite, DryRequest,
WarningSourceSkipContextManager,
)
class TestCaseBase(unittest.TestCase):
"""Base class for all tests."""
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
def assertRaisesRegex(self, *args, **kwargs):
"""
Wrapper of unittest.assertRaisesRegexp for Python 2 unittest.
assertRaisesRegexp is deprecated in Python 3.
"""
return self.assertRaisesRegexp(*args, **kwargs)
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, *args, **kwargs):
"""
Wrapper of unittest.assertRegexpMatches for Python 2 unittest.
assertRegexpMatches is deprecated in Python 3.
"""
return self.assertRegexpMatches(*args, **kwargs)
if not hasattr(unittest.TestCase, 'assertCountEqual'):
def assertCountEqual(self, *args, **kwargs):
"""
Wrapper of unittest.assertItemsEqual for Python 2 unittest.
assertItemsEqual is removed in Python 3.
"""
return self.assertItemsEqual(*args, **kwargs)
def _addUnexpectedSuccess(self, result):
"""Report and ignore."""
print(' unexpected success ', end='')
sys.stdout.flush()
result.addSuccess(self)
def _addExpectedFailure(self, result, exc_info=None):
"""Report and ignore."""
print(' expected failure ', end='')
sys.stdout.flush()
result.addSuccess(self)
def assertPageInNamespaces(self, page, namespaces):
"""
Assert that Pages is in namespaces.
@param page: Page
@type page: Page
@param namespaces: expected namespaces
@type namespaces: int or set of int
"""
if isinstance(namespaces, int):
namespaces = set([namespaces])
self.assertIn(page.namespace(), namespaces,
"%s not in namespace %r" % (page, namespaces))
def _get_gen_pages(self, gen, count=None, site=None):
"""
Get pages from gen, asserting they are Page from site.
Iterates at most two greater than count, including the
Page after count if it exists, and then a Page with title '...'
if additional items are in the iterator.
@param gen: Page generator
@type gen: generator of Page
@param count: number of pages to get
@type titles: int
@param site: Site of expected pages
@type site: APISite
"""
original_iter = iter(gen)
gen = itertools.islice(original_iter, 0, count)
gen_pages = list(gen)
try:
gen_pages.append(next(original_iter))
next(original_iter)
if not site:
site = gen_pages[0].site
gen_pages.append(pywikibot.Page(site, '...'))
except StopIteration:
pass
for page in gen_pages:
self.assertIsInstance(page, pywikibot.Page)
if site:
self.assertEqual(page.site, site)
return gen_pages
def _get_gen_titles(self, gen, count, site=None):
gen_pages = self._get_gen_pages(gen, count, site)
gen_titles = [page.title() for page in gen_pages]
return gen_titles
def _get_canonical_titles(self, titles, site=None):
if site:
titles = [pywikibot.Link(title, site).canonical_title()
for title in titles]
elif not isinstance(titles, list):
titles = list(titles)
return titles
def assertPagesInNamespaces(self, gen, namespaces):
"""
Assert that generator returns Pages all in namespaces.
@param gen: generator to iterate
@type gen: generator
@param namespaces: expected namespaces
@type namespaces: int or set of int
"""
if isinstance(namespaces, int):
namespaces = set([namespaces])
for page in gen:
self.assertPageInNamespaces(page, namespaces)
def assertPagesInNamespacesAll(self, gen, namespaces, skip=False):
"""
Try to confirm that generator returns Pages for all namespaces.
@param gen: generator to iterate
@type gen: generator
@param namespaces: expected namespaces
@type namespaces: int or set of int
@param count: maximum results to process
@type count: int
@param skip: skip test if not all namespaces found
@param skip: bool
"""
if isinstance(namespaces, int):
namespaces = set([namespaces])
else:
assert isinstance(namespaces, set)
page_namespaces = [page.namespace() for page in gen]
if skip and set(page_namespaces) != namespaces:
raise unittest.SkipTest('Pages in namespaces %r not found.'
% list(namespaces - set(page_namespaces)))
else:
self.assertEqual(set(page_namespaces), namespaces)
def assertPageTitlesEqual(self, gen, titles, site=None):
"""
Test that pages in gen match expected titles.
Only iterates to the length of titles plus two.
@param gen: Page generator
@type gen: generator of Page
@param titles: Expected titles
@type titles: iterator
@param site: Site of expected pages
@type site: APISite
"""
titles = self._get_canonical_titles(titles, site)
gen_titles = self._get_gen_titles(gen, len(titles), site)
self.assertEqual(gen_titles, titles)
def assertPageTitlesCountEqual(self, gen, titles, site=None):
"""
Test that pages in gen match expected titles, regardless of order.
Only iterates to the length of titles plus two.
@param gen: Page generator
@type gen: generator of Page
@param titles: Expected titles
@type titles: iterator
@param site: Site of expected pages
@type site: APISite
"""
titles = self._get_canonical_titles(titles, site)
gen_titles = self._get_gen_titles(gen, len(titles), site)
self.assertCountEqual(gen_titles, titles)
assertPagelistTitles = assertPageTitlesEqual
class TestLoggingMixin(TestCaseBase):
"""Logging for test cases."""
@classmethod
def setUpClass(cls):
"""Set up test class."""
cls._log_prefix = inspect.getfile(cls) + ':' + cls.__name__
def setUp(self):
"""Set up each unit test."""
super(TestLoggingMixin, self).setUp()
if hasattr(self, '_outcomeForDoCleanups'):
# Python 3 unittest & nose
outcome = self._outcomeForDoCleanups
elif hasattr(self, '_outcome'):
# Python 3.4 nose
outcome = self._outcome
elif hasattr(self, '_resultForDoCleanups'):
# Python 2 unittest & nose
outcome = self._resultForDoCleanups
else:
return
self._previous_errors = len(outcome.errors)
# nose 3.4 doesn't has failures
if hasattr(outcome, 'failures'):
self._previous_failures = len(outcome.failures)
log('START ' + self._log_prefix + '.' + self._testMethodName)
def tearDown(self):
"""Tear down test."""
super(TestLoggingMixin, self).tearDown()
if hasattr(self, '_outcomeForDoCleanups'):
# Python 3 unittest & nose
outcome = self._outcomeForDoCleanups
elif hasattr(self, '_outcome'):
# Python 3.4 nose
outcome = self._outcome
elif hasattr(self, '_resultForDoCleanups'):
# Python 2 unittest & nose
outcome = self._resultForDoCleanups
else:
return
if len(outcome.errors) > self._previous_errors:
status = ' NOT OK: ERROR'
# nose 3.4 doesn't has failures
elif (hasattr(outcome, 'failures') and
len(outcome.failures) > self._previous_failures):
status = ' NOT OK: FAILURE'
else:
status = ' OK'
log('END ' + self._log_prefix + '.' + self._testMethodName + status)
class TestTimerMixin(TestCaseBase):
"""Time each test and report excessive durations."""
# Number of seconds each test may consume
# before a note is added after the test.
test_duration_warning_interval = 10
def setUp(self):
"""Set up test."""
super(TestTimerMixin, self).setUp()
self.test_start = time.time()
def tearDown(self):
"""Tear down test."""
self.test_completed = time.time()
duration = self.test_completed - self.test_start
if duration > self.test_duration_warning_interval:
print(' %0.3fs' % duration, end=' ')
sys.stdout.flush()
super(TestTimerMixin, self).tearDown()
class DisableSiteMixin(TestCaseBase):
"""Test cases not connected to a Site object.
Do not use this for mock Site objects.
Never set a class or instance variable called 'site'
As it will prevent tests from executing when invoked as:
$ nosetests -a '!site' -v
"""
def setUp(self):
"""Set up test."""
self.old_Site_lookup_method = pywikibot.Site
pywikibot.Site = lambda *args: self.fail('%s: Site() not permitted'
% self.__class__.__name__)
super(DisableSiteMixin, self).setUp()
def tearDown(self):
"""Tear down test."""
super(DisableSiteMixin, self).tearDown()
pywikibot.Site = self.old_Site_lookup_method
class ForceCacheMixin(TestCaseBase):
"""Aggressively cached API test cases.
Patches pywikibot.data.api to aggressively cache
API responses.
"""
def setUp(self):
"""Set up test."""
patch_request()
super(ForceCacheMixin, self).setUp()
def tearDown(self):
"""Tear down test."""
super(ForceCacheMixin, self).tearDown()
unpatch_request()
class SiteNotPermitted(pywikibot.site.BaseSite):
"""Site interface to prevent sites being loaded."""
def __init__(self, code, fam=None, user=None, sysop=None):
"""Constructor."""
raise pywikibot.SiteDefinitionError(
'Loading site %s:%s during dry test not permitted'
% (fam, code))
class DisconnectedSiteMixin(TestCaseBase):
"""Test cases using a disconnected Site object.
Do not use this for mock Site objects.
Never set a class or instance variable called 'site'
As it will prevent tests from executing when invoked as:
$ nosetests -a '!site' -v
"""
def setUp(self):
"""Set up test."""
self.old_config_interface = config.site_interface
# TODO: put a dummy subclass into config.site_interface
# as the default, to show a useful error message.
config.site_interface = SiteNotPermitted
pywikibot.data.api.Request = DryRequest
self.old_convert = pywikibot.Claim.TARGET_CONVERTER['commonsMedia']
pywikibot.Claim.TARGET_CONVERTER['commonsMedia'] = (
lambda value, site: pywikibot.FilePage(
pywikibot.Site('commons', 'commons', interface=DrySite),
value))
super(DisconnectedSiteMixin, self).setUp()
def tearDown(self):
"""Tear down test."""
super(DisconnectedSiteMixin, self).tearDown()
config.site_interface = self.old_config_interface
pywikibot.data.api.Request = _original_Request
pywikibot.Claim.TARGET_CONVERTER['commonsMedia'] = self.old_convert
class CacheInfoMixin(TestCaseBase):
"""Report cache hits and misses."""
def setUp(self):
"""Set up test."""
super(CacheInfoMixin, self).setUp()
self.cache_misses_start = tests.cache_misses
self.cache_hits_start = tests.cache_hits
def tearDown(self):
"""Tear down test."""
self.cache_misses = tests.cache_misses - self.cache_misses_start
self.cache_hits = tests.cache_hits - self.cache_hits_start
if self.cache_misses:
print(' %d cache misses' % self.cache_misses, end=' ')
if self.cache_hits:
print(' %d cache hits' % self.cache_hits, end=' ')
if self.cache_misses or self.cache_hits:
sys.stdout.flush()
super(CacheInfoMixin, self).tearDown()
class CheckHostnameMixin(TestCaseBase):
"""Check the hostname is online before running tests."""
_checked_hostnames = {}
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Prevent tests running if the host is down.
"""
super(CheckHostnameMixin, cls).setUpClass()
if not hasattr(cls, 'sites'):
return
for key, data in cls.sites.items():
if 'hostname' not in data:
raise Exception('%s: hostname not defined for %s'
% (cls.__name__, key))
hostname = data['hostname']
if hostname in cls._checked_hostnames:
if isinstance(cls._checked_hostnames[hostname], Exception):
raise unittest.SkipTest(
'%s: hostname %s failed (cached): %s'
% (cls.__name__, hostname,
cls._checked_hostnames[hostname]))
elif cls._checked_hostnames[hostname] is False:
raise unittest.SkipTest('%s: hostname %s failed (cached)'
% (cls.__name__, hostname))
else:
continue
e = None
try:
if '://' not in hostname:
hostname = 'http://' + hostname
r = http.fetch(uri=hostname,
default_error_handling=False)
if r.exception:
e = r.exception
else:
if r.status not in [200, 301, 302, 303, 307, 308]:
raise ServerError('HTTP status: %d' % r.status)
r.content # default decode may raise exception
except Exception as e2:
pywikibot.error('%s: accessing %s caused exception:'
% (cls.__name__, hostname))
pywikibot.exception(e2, tb=True)
e = e2
pass
if e:
cls._checked_hostnames[hostname] = e
raise unittest.SkipTest(
'%s: hostname %s failed: %s'
% (cls.__name__, hostname, e))
cls._checked_hostnames[hostname] = True
class SiteWriteMixin(TestCaseBase):
"""
Test cases involving writing to the server.
When editing, the API should not be patched to use
CachedRequest. This class prevents that.
"""
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Reject write test classes configured with non-test wikis, or caching.
Prevent test classes from writing to the site by default.
If class attribute 'write' is -1, the test class is skipped unless
environment variable PYWIKIBOT2_TEST_WRITE_FAIL is set to 1.
Otherwise the test class is skipped unless environment variable
PYWIKIBOT2_TEST_WRITE is set to 1.
"""
super(SiteWriteMixin, cls).setUpClass()
site = cls.get_site()
assert 'test' in (site.family.name, site.code)
if cls.write == -1:
env_var = 'PYWIKIBOT2_TEST_WRITE_FAIL'
else:
env_var = 'PYWIKIBOT2_TEST_WRITE'
if os.environ.get(env_var, '0') != '1':
raise unittest.SkipTest(
'%r write tests disabled. '
'Set %s=1 to enable.'
% (cls.__name__, env_var))
if issubclass(cls, ForceCacheMixin):
raise Exception(
'%s can not be a subclass of both '
'SiteEditTestCase and ForceCacheMixin'
% cls.__name__)
class RequireUserMixin(TestCaseBase):
"""Run tests against a specific site, with a login."""
user = True
@classmethod
def require_site_user(cls, family, code, sysop=False):
"""Check the user config has a valid login to the site."""
if not cls.has_site_user(family, code, sysop=sysop):
raise unittest.SkipTest(
'%s: No %susername for %s:%s'
% (cls.__name__,
"sysop " if sysop else "",
family, code))
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Skip the test class if the user config does not have
a valid login to the site.
"""
super(RequireUserMixin, cls).setUpClass()
sysop = hasattr(cls, 'sysop') and cls.sysop
for site in cls.sites.values():
cls.require_site_user(site['family'], site['code'], sysop)
try:
site['site'].login(sysop)
except NoUsername:
pass
if not site['site'].user():
raise unittest.SkipTest(
'%s: Not able to login to %s as %s'
% (cls.__name__,
'sysop' if sysop else 'bot',
site['site']))
def setUp(self):
"""
Set up the test case.
Login to the site if it is not logged in.
"""
super(RequireUserMixin, self).setUp()
sysop = hasattr(self, 'sysop') and self.sysop
# There may be many sites, and setUp doesnt know
# which site is to be tested; ensure they are all
# logged in.
for site in self.sites.values():
site = site['site']
if not site.logged_in(sysop):
site.login(sysop)
def get_userpage(self, site=None):
"""Create a User object for the user's userpage."""
if not site:
site = self.get_site()
if hasattr(self, '_userpage'):
# For multi-site test classes, or site is specified as a param,
# the cached userpage object may not be the desired site.
if self._userpage.site == site:
return self._userpage
sysop = hasattr(self, 'sysop') and self.sysop
userpage = pywikibot.User(site, site.username(sysop))
self._userpage = userpage
return userpage
class MetaTestCaseClass(type):
"""Test meta class."""
def __new__(cls, name, bases, dct):
"""Create the new class."""
def wrap_method(key, sitedata, func):
def wrapped_method(self):
sitedata = self.sites[key]
self.site_key = key
self.family = sitedata['family']
self.code = sitedata['code']
self.site = sitedata['site']
func(self, key)
sitename = sitedata['family'] + ':' + sitedata['code']
if func.__doc__:
if func.__doc__.endswith('.'):
wrapped_method.__doc__ = func.__doc__[:-1]
else:
wrapped_method.__doc__ = func.__doc__
wrapped_method.__doc__ += ' on ' + sitename
else:
wrapped_method.__doc__ = 'Test ' + sitename
return wrapped_method
tests = [attr_name
for attr_name in dct
if attr_name.startswith('test')]
dct['abstract_class'] = len(tests) == 0
# Bail out if it is the abstract class.
if dct['abstract_class']:
return super(MetaTestCaseClass, cls).__new__(cls, name, bases, dct)
# Inherit superclass attributes
for base in bases:
for key in ('pwb', 'net', 'site', 'user', 'sysop', 'write',
'sites', 'family', 'code', 'dry', 'hostname',
'hostnames', 'cached', 'cacheinfo', 'wikibase'):
if hasattr(base, key) and key not in dct:
# print('%s has %s; copying to %s'
# % (base.__name__, key, name))
dct[key] = getattr(base, key)
# Will be inserted into dct[sites] later
if 'hostname' in dct:
hostnames = [dct['hostname']]
del dct['hostname']
elif 'hostnames' in dct:
hostnames = dct['hostnames']
else:
hostnames = []
if 'net' in dct and dct['net'] is False:
dct['site'] = False
if 'sites' in dct and 'site' not in dct:
dct['site'] = True
# If either are specified, assume both should be specified
if 'family' in dct or 'code' in dct:
dct['site'] = True
if (('sites' not in dct or not len(dct['sites'])) and
'family' in dct and
'code' in dct and dct['code'] != '*'):
# Add entry to self.sites
dct['sites'] = {
str(dct['family'] + ':' + dct['code']): {
'code': dct['code'],
'family': dct['family'],
}
}
if hostnames:
if 'sites' not in dct:
dct['sites'] = {}
for hostname in hostnames:
assert hostname not in dct['sites']
dct['sites'][hostname] = {'hostname': hostname}
if 'dry' in dct and dct['dry'] is True:
dct['net'] = False
if (('sites' not in dct and 'site' not in dct) or
('site' in dct and not dct['site'])):
# Prevent use of pywikibot.Site
bases = tuple([DisableSiteMixin] + list(bases))
# 'pwb' tests will _usually_ require a site. To ensure the
# test class dependencies are declarative, this requires the
# test writer explicitly sets 'site=False' so code reviewers
# check that the script invoked by pwb will not load a site.
if 'pwb' in dct and dct['pwb']:
if 'site' not in dct:
raise Exception(
'%s: Test classes using pwb must set "site"; add '
'site=False if the test script will not use a site'
% name)
# If the 'site' attribute is a false value,
# remove it so it matches !site in nose.
if 'site' in dct:
del dct['site']
# If there isn't a site, require declaration of net activity.
if 'net' not in dct:
raise Exception(
'%s: Test classes without a site configured must set "net"'
% name)
# If the 'net' attribute is a false value,
# remove it so it matches !net in nose.
if not dct['net']:
del dct['net']
return super(MetaTestCaseClass, cls).__new__(cls, name, bases, dct)
# The following section is only processed if the test uses sites.
if 'dry' in dct and dct['dry']:
bases = tuple([DisconnectedSiteMixin] + list(bases))
del dct['net']
else:
dct['net'] = True
if 'cacheinfo' in dct and dct['cacheinfo']:
bases = tuple([CacheInfoMixin] + list(bases))
if 'cached' in dct and dct['cached']:
bases = tuple([ForceCacheMixin] + list(bases))
bases = tuple([CheckHostnameMixin] + list(bases))
if 'write' in dct and dct['write']:
if 'user' not in dct:
dct['user'] = True
bases = tuple([SiteWriteMixin] + list(bases))
if ('user' in dct and dct['user']) or ('sysop' in dct and dct['sysop']):
bases = tuple([RequireUserMixin] + list(bases))
for test in tests:
test_func = dct[test]
# method decorated with unittest.expectedFailure has no arguments
# so it is assumed to not be a multi-site test method.
if test_func.__code__.co_argcount == 0:
continue
# a normal test method only accepts 'self'
if test_func.__code__.co_argcount == 1:
continue
# a multi-site test method only accepts 'self' and the site-key
if test_func.__code__.co_argcount != 2:
raise Exception(
'%s: Test method %s must accept either 1 or 2 arguments; '
' %d found'
% (name, test, test_func.__code__.co_argcount))
# create test methods processed by unittest
for (key, sitedata) in dct['sites'].items():
test_name = test + '_' + key
dct[test_name] = wrap_method(key, sitedata, dct[test])
if key in dct.get('expected_failures', []):
dct[test_name] = unittest.expectedFailure(dct[test_name])
del dct[test]
return super(MetaTestCaseClass, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestCase(TestTimerMixin, TestLoggingMixin, TestCaseBase):
"""Run tests on pre-defined sites."""
__metaclass__ = MetaTestCaseClass
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Prefetch the Site object for each of the sites the test
class has declared are needed.
"""
super(TestCase, cls).setUpClass()
if not hasattr(cls, 'sites'):
return
# This stores the site under the site name.
if not cls.sites:
cls.sites = {}
# If the test is not cached, create new Site objects for this class
if not hasattr(cls, 'cached') or not cls.cached:
orig_sites = pywikibot._sites
pywikibot._sites = {}
interface = None # defaults to 'APISite'
dry = hasattr(cls, 'dry') and cls.dry
if dry:
interface = DrySite
for data in cls.sites.values():
if ('code' in data and data['code'] in ('test', 'mediawiki') and
'PYWIKIBOT2_TEST_PROD_ONLY' in os.environ and not dry):
raise unittest.SkipTest(
'Site code "%s" and PYWIKIBOT2_TEST_PROD_ONLY is set.'
% data['code'])
if 'site' not in data and 'code' in data and 'family' in data:
data['site'] = Site(data['code'], data['family'],
interface=interface)
if 'hostname' not in data and 'site' in data:
try:
data['hostname'] = data['site'].hostname()
except KeyError:
# The family has defined this as obsolete
# without a mapping to a hostname.
pass
if not hasattr(cls, 'cached') or not cls.cached:
pywikibot._sites = orig_sites
if len(cls.sites) == 1:
key = next(iter(cls.sites.keys()))
if 'site' in cls.sites[key]:
cls.site = cls.sites[key]['site']
@classmethod
def get_site(cls, name=None):
"""Return the prefetched Site object."""
if not name and hasattr(cls, 'sites'):
if len(cls.sites) == 1:
name = next(iter(cls.sites.keys()))
else:
raise Exception(
'"%s.get_site(name=None)" called with multiple sites'
% cls.__name__)
if name and name not in cls.sites:
raise Exception('"%s" not declared in %s'
% (name, cls.__name__))
if isinstance(cls.site, BaseSite):
assert cls.sites[name]['site'] == cls.site
return cls.site
return cls.sites[name]['site']
@classmethod
def has_site_user(cls, family, code, sysop=False):
"""Check the user config has a user for the site."""
if not family:
raise Exception('no family defined for %s' % cls.__name__)
if not code:
raise Exception('no site code defined for %s' % cls.__name__)
usernames = config.sysopnames if sysop else config.usernames
return code in usernames[family] or '*' in usernames[family]
def __init__(self, *args, **kwargs):
"""Constructor."""
super(TestCase, self).__init__(*args, **kwargs)
if not hasattr(self, 'sites'):
return
# Create an instance method named the same as the class method
self.get_site = lambda name=None: self.__class__.get_site(name)
def get_mainpage(self, site=None, force=False):
"""Create a Page object for the sites main page.
@param site: Override current site, obtained using L{get_site}.
@type site: APISite or None
@param force: Get an unused Page object
@type force: bool
@rtype: Page
"""
if not site:
site = self.get_site()
if hasattr(self, '_mainpage') and not force:
# For multi-site test classes, or site is specified as a param,
# the cached mainpage object may not be the desired site.
if self._mainpage.site == site:
return self._mainpage
mainpage = pywikibot.Page(site, site.siteinfo['mainpage'])
if mainpage.isRedirectPage():
mainpage = mainpage.getRedirectTarget()
if force:
mainpage = pywikibot.Page(self.site, mainpage.title())
self._mainpage = mainpage
return mainpage
def get_missing_article(self, site=None):
"""Get a Page which refers to a missing page on the site."""
if not site:
site = self.get_site()
page = pywikibot.Page(pywikibot.page.Link(
"There is no page with this title", site))
if page.exists():
raise unittest.SkipTest("Did not find a page that does not exist.")
return page
class CapturingTestCase(TestCase):
"""
Capture assertion calls to do additional calls around them.
All assertions done which start with "assert" are patched in such a way that
after the assertion it calls C{process_assertion} with the assertion and the
arguments.
To avoid that it patches the assertion it's possible to put the call in an
C{disable_assert_capture} with-statement.
"""
# Is True while an assertion is running, so that assertions won't be patched
# when they are executed while an assertion is running and only the outer
# most assertion gets actually patched.
_patched = False
@contextmanager
def disable_assert_capture(self):
"""A context manager which preventing that asssertions are patched."""
nested = self._patched # Don't reset if it was set before
self._patched = True
yield
if not nested:
self._patched = False
def process_assert(self, assertion, *args, **kwargs):
"""Handle the assertion."""
assertion(*args, **kwargs)
def patch_assert(self, assertion):
"""Execute process_assert when the assertion is called."""
def inner_assert(*args, **kwargs):
assert self._patched is False
self._patched = True
try:
self.process_assert(assertion, *args, **kwargs)
finally:
self._patched = False
return inner_assert
def __getattribute__(self, attr):
"""Patch assertions if enabled."""
result = super(CapturingTestCase, self).__getattribute__(attr)
if attr.startswith('assert') and not self._patched:
return self.patch_assert(result)
else:
return result
class SiteAttributeTestCase(TestCase):
"""Add the sites as attributes to the instances."""
@classmethod
def setUpClass(cls):
"""Add each initialized site as an attribute to cls."""
super(SiteAttributeTestCase, cls).setUpClass()
for site in cls.sites:
if 'site' in cls.sites[site]:
setattr(cls, site, cls.sites[site]['site'])
class DefaultSiteTestCase(TestCase):
"""Run tests against the config specified site."""
family = config.family
code = config.mylang
@classmethod
def override_default_site(cls, site):
"""
Override the default site.
@param site: site tests should use
@type site: BaseSite
"""
print('%s using %s instead of %s:%s.'
% (cls.__name__, site, cls.family, cls.code))
cls.site = site
cls.family = site.family.name
cls.code = site.code
cls.sites = {
cls.site: {
'family': cls.family,
'code': cls.code,
'site': cls.site,
'hostname': cls.site.hostname(),
}
}
class AlteredDefaultSiteTestCase(TestCase):
"""Save and restore the config.mylang and config.family."""
def setUp(self):
"""Prepare the environment for running main() in a script."""
self.original_family = pywikibot.config.family
self.original_code = pywikibot.config.mylang
super(AlteredDefaultSiteTestCase, self).setUp()
def tearDown(self):
"""Restore the environment."""
pywikibot.config.family = self.original_family
pywikibot.config.mylang = self.original_code
super(AlteredDefaultSiteTestCase, self).tearDown()
class ScenarioDefinedDefaultSiteTestCase(AlteredDefaultSiteTestCase):
"""Tests that depend on the default site being set to the test site."""
def setUp(self):
"""Prepare the environment for running main() in a script."""
super(ScenarioDefinedDefaultSiteTestCase, self).setUp()
site = self.get_site()
pywikibot.config.family = site.family
pywikibot.config.mylang = site.code
class DefaultDrySiteTestCase(DefaultSiteTestCase):
"""Run tests using the config specified site in offline mode."""
dry = True
class WikimediaSiteTestCase(TestCase):
"""Test class uses only WMF sites."""
wmf = True
class WikimediaDefaultSiteTestCase(DefaultSiteTestCase, WikimediaSiteTestCase):
"""Test class to run against a WMF site, preferring the default site."""
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Check that the default site is a Wikimedia site.
Use en.wikipedia.org as a fallback.
"""
super(WikimediaDefaultSiteTestCase, cls).setUpClass()
assert hasattr(cls, 'site') and hasattr(cls, 'sites')
assert len(cls.sites) == 1
site = cls.get_site()
if not isinstance(site.family, WikimediaFamily):
cls.override_default_site(pywikibot.Site('en', 'wikipedia'))
class WikibaseTestCase(TestCase):
"""Run tests against a wikibase site."""
wikibase = True
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Checks that all sites are configured with a Wikibase repository,
with Site.has_data_repository() returning True, and all sites
use the same data repository.
"""
super(WikibaseTestCase, cls).setUpClass()
for data in cls.sites.values():
if 'site' not in data:
continue
site = data['site']
if not site.has_data_repository:
raise unittest.SkipTest(
u'%s: %r does not have data repository'
% (cls.__name__, site))
if (hasattr(cls, 'repo') and
cls.repo != site.data_repository()):
raise Exception(
'%s: sites do not all have the same data repository'
% cls.__name__)
cls.repo = site.data_repository()
@classmethod
def get_repo(cls):
"""Return the prefetched DataSite object."""
return cls.repo
def __init__(self, *args, **kwargs):
"""Constructor."""
super(WikibaseTestCase, self).__init__(*args, **kwargs)
if not hasattr(self, 'sites'):
return
# Create an instance method named the same as the class method
self.get_repo = lambda: self.repo
class WikibaseClientTestCase(WikibaseTestCase):
"""Run tests against a specific site connected to a wikibase."""
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Checks that all sites are configured as a Wikibase client,
with Site.has_transcluded_data() returning True.
"""
super(WikibaseClientTestCase, cls).setUpClass()
for site in cls.sites.values():
if not site['site'].has_transcluded_data:
raise unittest.SkipTest(
u'%s: %r does not have transcluded data'
% (cls.__name__, site['site']))
class DefaultWikibaseClientTestCase(WikibaseClientTestCase,
DefaultSiteTestCase):
"""Run tests against any site connected to a Wikibase."""
pass
class WikidataTestCase(WikibaseTestCase):
"""Test cases use Wikidata."""
family = 'wikidata'
code = 'wikidata'
cached = True
class DefaultWikidataClientTestCase(DefaultWikibaseClientTestCase):
"""Run tests against any site connected to Wikidata."""
@classmethod
def setUpClass(cls):
"""
Set up the test class.
Require the data repository is wikidata.org.
"""
super(WikibaseClientTestCase, cls).setUpClass()
if str(cls.get_repo()) != 'wikidata:wikidata':
raise unittest.SkipTest(
u'%s: %s is not connected to Wikidata.'
% (cls.__name__, cls.get_site()))
class ScriptMainTestCase(ScenarioDefinedDefaultSiteTestCase):
"""Test running a script main()."""
pass
class PwbTestCase(TestCase):
"""
Test cases use pwb.py to invoke scripts.
Test cases which use pwb typically also access a site, and use the network.
Even during initialisation, scripts may call pywikibot.handle_args, which
initialises loggers and uses the network to determine if the code is stale.
The flag 'pwb' is used by the TestCase metaclass to check that a test site
is set declared in the class properties, or that 'site = False' is added
to the class properties in the unlikely scenario that the test case
uses pwb in a way that doesnt use a site.
If a test class is marked as 'site = False', the metaclass will also check
that the 'net' flag is explicitly set.
"""
pwb = True
def setUp(self):
"""Prepare the environment for running the pwb.py script."""
super(PwbTestCase, self).setUp()
self.orig_pywikibot_dir = None
if 'PYWIKIBOT2_DIR' in os.environ:
self.orig_pywikibot_dir = os.environ['PYWIKIBOT2_DIR']
base_dir = pywikibot.config.base_dir
if sys.platform == 'win32' and sys.version_info[0] < 3:
base_dir = str(base_dir)
os.environ[str('PYWIKIBOT2_DIR')] = base_dir
def tearDown(self):
"""Restore the environment after running the pwb.py script."""
super(PwbTestCase, self).tearDown()
del os.environ['PYWIKIBOT2_DIR']
if self.orig_pywikibot_dir:
os.environ[str('PYWIKIBOT2_DIR')] = self.orig_pywikibot_dir
def _execute(self, args, data_in=None, timeout=0, error=None):
site = self.get_site()
args = args + ['-family:' + site.family.name,
'-code:' + site.code]
return execute_pwb(args, data_in, timeout, error)
class RecentChangesTestCase(WikimediaDefaultSiteTestCase):
"""Test cases for tests that use recent change."""
# site.recentchanges() includes external edits from wikidata,
# except on wiktionaries which are not linked to wikidata
# so total=3 should not be too high for most sites.
length = 3
@classmethod
def setUpClass(cls):
"""Set up test class."""
if os.environ.get('PYWIKIBOT2_TEST_NO_RC', '0') == '1':
raise unittest.SkipTest('RecentChanges tests disabled.')
super(RecentChangesTestCase, cls).setUpClass()
if cls.get_site().code == 'test':
cls.override_default_site(pywikibot.Site('en', 'wikipedia'))
class DebugOnlyTestCase(TestCase):
"""Test cases that only operate in debug mode."""
@classmethod
def setUpClass(cls):
"""Set up test class."""
if not __debug__:
raise unittest.SkipTest(
'%s is disabled when __debug__ is disabled.' % cls.__name__)
super(DebugOnlyTestCase, cls).setUpClass()
class DeprecationTestCase(DebugOnlyTestCase, TestCase):
"""Test cases for deprecation function in the tools module."""
_generic_match = re.compile(r'.* is deprecated(; use .* instead)?\.')
skip_list = [
unittest.case._AssertRaisesContext,
TestCase.assertRaises,
TestCase.assertRaisesRegex,
TestCase.assertRaisesRegexp,
]
# Require no instead string
NO_INSTEAD = object()
# Require an instead string
INSTEAD = object()
# Python 3 component in the call stack of _AssertRaisesContext
if hasattr(unittest.case, '_AssertRaisesBaseContext'):
skip_list.append(unittest.case._AssertRaisesBaseContext)
def __init__(self, *args, **kwargs):
"""Constructor."""
super(DeprecationTestCase, self).__init__(*args, **kwargs)
self.warning_log = []
self.expect_warning_filename = inspect.getfile(self.__class__)
if self.expect_warning_filename.endswith((".pyc", ".pyo")):
self.expect_warning_filename = self.expect_warning_filename[:-1]
self._do_test_warning_filename = True
self._ignore_unknown_warning_packages = False
self.context_manager = WarningSourceSkipContextManager(self.skip_list)
def _reset_messages(self):
"""Reset captured deprecation warnings."""
self._do_test_warning_filename = True
del self.warning_log[:]
@property
def deprecation_messages(self):
"""Return captured deprecation warnings."""
messages = [str(item.message) for item in self.warning_log]
return messages
@classmethod
def _build_message(cls, deprecated, instead):
if deprecated is None:
if instead is None:
msg = None
elif instead is True:
msg = cls.INSTEAD
else:
assert instead is False
msg = cls.NO_INSTEAD
else:
msg = '{0} is deprecated'.format(deprecated)
if instead:
msg += '; use {0} instead'.format(instead)
msg += '.'
return msg
def assertDeprecationParts(self, deprecated=None, instead=None):
"""
Assert that a deprecation warning happened.
To simplify deprecation tests it just requires the to separated parts
and forwards the result to L{assertDeprecation}.
@param deprecated: The deprecated string. If None it uses a generic
match depending on instead.
@type deprecated: str or None
@param instead: The instead string unless deprecated is None. If it's
None it allows any generic deprecation string, on True only those
where instead string is present and on False only those where it's
missing. If the deprecation string is not None, no instead string
is expected when instead evaluates to False.
@type instead: str or None or True or False
"""
self.assertDeprecation(self._build_message(deprecated, instead))
def assertDeprecation(self, msg=None):
"""
Assert that a deprecation warning happened.
@param msg: Either the specific message or None to allow any generic
message. When set to C{INSTEAD} it only counts those supplying an
alternative and when C{NO_INSTEAD} only those not supplying one.
@type msg: string or None or INSTEAD or NO_INSTEAD
"""
if msg is None or msg is self.INSTEAD or msg is self.NO_INSTEAD:
deprecation_messages = self.deprecation_messages
for deprecation_message in deprecation_messages:
match = self._generic_match.match(deprecation_message)
if (match and bool(match.group(1)) == (msg is self.INSTEAD) or
msg is None):
break
else:
self.fail('No generic deprecation message match found in '
'{0}'.format(deprecation_messages))
else:
self.assertIn(msg, self.deprecation_messages)
if self._do_test_warning_filename:
self.assertDeprecationFile(self.expect_warning_filename)
def assertOneDeprecationParts(self, deprecated=None, instead=None, count=1):
"""
Assert that exactly one deprecation message happened and reset.
It uses the same arguments as L{assertDeprecationParts}.
"""
self.assertOneDeprecation(self._build_message(deprecated, instead),
count)
def assertOneDeprecation(self, msg=None, count=1):
"""Assert that exactly one deprecation message happened and reset."""
self.assertDeprecation(msg)
# This is doing such a weird structure, so that it shows any other
# deprecation message from the set.
self.assertCountEqual(set(self.deprecation_messages),
[self.deprecation_messages[0]])
self.assertEqual(len(self.deprecation_messages), count)
self._reset_messages()
def assertNoDeprecation(self, msg=None):
"""Assert that no deprecation warning happened."""
if msg:
self.assertNotIn(msg, self.deprecation_messages)
else:
self.assertEqual([], self.deprecation_messages)
def assertDeprecationClass(self, cls):
"""Assert that all deprecation warning are of one class."""
self.assertTrue(all(isinstance(item.message, cls)
for item in self.warning_log))
def assertDeprecationFile(self, filename):
"""Assert that all deprecation warning are of one filename."""
for item in self.warning_log:
if (self._ignore_unknown_warning_packages and
'pywikibot' not in item.filename):
continue
if item.filename != filename:
self.fail(
'expected warning filename %s; warning item: %s'
% (filename, item))
def setUp(self):
"""Set up unit test."""
super(DeprecationTestCase, self).setUp()
self.warning_log = self.context_manager.__enter__()
warnings.simplefilter("always")
self._reset_messages()
def tearDown(self):
"""Tear down unit test."""
self.context_manager.__exit__()
super(DeprecationTestCase, self).tearDown()
class AutoDeprecationTestCase(CapturingTestCase, DeprecationTestCase):
"""
A test case capturing asserts and asserting a deprecation afterwards.
For example C{assertEqual} will do first C{assertEqual} and then
C{assertOneDeprecation}.
"""
def process_assert(self, assertion, *args, **kwargs):
"""Handle assertion and call C{assertOneDeprecation} after it."""
super(AutoDeprecationTestCase, self).process_assert(
assertion, *args, **kwargs)
self.assertOneDeprecation()
skip_list = DeprecationTestCase.skip_list + [
CapturingTestCase.process_assert,
CapturingTestCase.patch_assert,
process_assert,
]
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Procurement Model
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ProcurementModel",
"proc_rheader"
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3ProcurementModel(S3Model):
"""
Procurement
A module to handle Procurement
Currently handles
Planned Procurements
@ToDo: Extend to
Purchase Requests (PR)
Requests for Quotation (RFQ)
Competitive Bid Analysis (CBA)
Purchase Orders (PO)
"""
names = ["proc_plan",
"proc_plan_item"
]
def model(self):
T = current.T
db = current.db
auth = current.auth
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
messages = current.messages
configure = self.configure
# =====================================================================
# Planned Procurements
#
proc_shipping_opts = { 0: messages["NONE"],
1: T("Air"),
2: T("Rail"),
3: T("Road"),
4: T("Sea")
}
tablename = "proc_plan"
define_table(tablename,
self.super_link("site_id", "org_site",
#label = T("Inventory"),
label = T("Office"),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
empty = False,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Inventory"),
# messages.AUTOCOMPLETE_HELP)),
represent=self.org_site_represent),
# @ToDo: Link the Plan to a Project or Activity (if that module is enabled)
#project_id(),
s3_date("order_date",
label = T("Order Date")
),
s3_date("eta",
label = T("Date Expected"),
),
# @ToDo: Do we want more than 1 supplier per Plan?
# @ToDo: Filter to orgs of type 'supplier'
self.org_organisation_id(label=T("Supplier")),
Field("shipping", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(proc_shipping_opts)),
represent = lambda opt: \
proc_shipping_opts.get(opt,
messages.UNKNOWN_OPT),
label = T("Shipping Method"),
default = 0,
),
# @ToDo: Add estimated shipping costs
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Procurement Plan"),
title_display = T("Procurement Plan Details"),
title_list = T("Procurement Plans"),
title_update = T("Edit Procurement Plan"),
label_list_button = T("List Procurement Plans"),
label_delete_button = T("Delete Procurement Plan"),
msg_record_created = T("Procurement Plan added"),
msg_record_modified = T("Procurement Plan updated"),
msg_record_deleted = T("Procurement Plan deleted"),
msg_list_empty = T("No Procurement Plans currently registered"))
# ---------------------------------------------------------------------
# Redirect to the Items tabs after creation
plan_item_url = URL(f="plan", args=["[id]", "plan_item"])
configure(tablename,
# @ToDo: Move these to controller r.interactive?
create_next = plan_item_url,
update_next = plan_item_url)
plan_id = S3ReusableField("plan_id", "reference %s" % tablename,
sortby="date",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "proc_plan.id",
self.proc_plan_represent,
orderby="proc_plan.date",
sort=True)),
represent = self.proc_plan_represent,
label = T("Procurement Plan"),
ondelete = "CASCADE")
# Items as a component of Plans
self.add_components(tablename,
proc_plan_item="plan_id",
)
# =====================================================================
# Procurement Plan Items
#
tablename = "proc_plan_item"
define_table(tablename,
plan_id(),
self.supply_item_entity_id,
self.supply_item_id(),
self.supply_item_pack_id(),
Field("quantity", "double", notnull = True,
label = T("Quantity"),
),
# @ToDo: Move this into a Currency Widget
# for the pack_value field
s3_currency(readable=False,
writable=False
),
Field("pack_value", "double",
readable=False,
writable=False,
label = T("Value per Pack")),
#Field("pack_quantity",
# "double",
# compute = record_pack_quantity), # defined in supply
#Field.Method("pack_quantity",
# self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Procurement Plan"),
title_display = T("Procurement Plan Item Details"),
title_list = T("Items in Procurement Plan"),
title_update = T("Edit Procurement Plan Item"),
label_list_button = T("List Items in Procurement Plan"),
label_delete_button = T("Remove Item from Procurement Plan"),
msg_record_created = T("Item added to Procurement Plan"),
msg_record_modified = T("Procurement Plan Item updated"),
msg_record_deleted = T("Item removed from Procurement Plan"),
msg_list_empty = T("No Items currently registered in this Procurement Plan"))
# ---------------------------------------------------------------------
# Item Search Method
#
filter_widgets = [
S3TextFilter(["item_id$name",
#"item_id$category_id$name",
#"plan_id$site_id$name"
],
label=T("Search"),
comment=T("Search for an item by text."),
),
S3OptionsFilter("plan_id$organisation_id$name",
label=T("Supplier"),
comment=T("If none are selected, then all are searched."),
cols = 2,
hidden = True,
),
#S3OptionsFilter("plan_id$site_id",
# label=T("Facility"),
# represent ="%(name)s",
# comment=T("If none are selected, then all are searched."),
# cols = 2,
# hidden = True,
# ),
#S3DateFilter("plan_id$order_date",
# label=T("Order Date"),
# hidden = True,
# ),
#S3DateFilter("plan_id$eta",
# label=T("Date Expected"),
# hidden = True,
# ),
]
configure(tablename,
super_entity = "supply_item_entity",
filter_widgets = filter_widgets,
#report_groupby = db.proc_plan.site_id,
report_hide_comments = True)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def proc_plan_represent(id, row=None):
"""
Represent a Procurement Plan
"""
if row:
table = current.db.proc_plan
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.proc_plan
row = db(table.id == id).select(table.site_id,
table.order_date,
limitby=(0, 1)).first()
try:
return "%s (%s)" % (table.site_id.represent(row.site_id),
table.order_date.represent(row.order_date))
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def proc_rheader(r):
""" Resource Header for Procurements """
if r.representation == "html":
plan = r.record
if plan:
T = current.T
tabs = [
(T("Edit Details"), None),
(T("Items"), "plan_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR( TH("%s: " % table.site_id.label),
table.site_id.represent(plan.site_id),
),
TR( TH("%s: " % table.order_date.label),
table.order_date.represent(plan.order_date),
),
TR( TH("%s: " % table.eta.label),
table.eta.represent(plan.eta),
),
TR( TH("%s: " % table.shipping.label),
table.shipping.represent(plan.shipping),
),
),
rheader_tabs
)
return rheader
return None
# END =========================================================================
| |
"""The tests for the Owntracks device tracker."""
import asyncio
import json
import unittest
from unittest.mock import patch
from tests.common import (
assert_setup_component, fire_mqtt_message, mock_coro, mock_component,
get_test_home_assistant, mock_mqtt_component)
import homeassistant.components.device_tracker.owntracks as owntracks
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM, STATE_NOT_HOME
from homeassistant.util.async import run_coroutine_threadsafe
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINT_TOPIC = 'owntracks/{}/{}/waypoints'.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINT_TOPIC_BLOCKED = 'owntracks/{}/{}/waypoints'.format(
USER_BLACKLIST, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
MOBILE_BEACON_FMT = 'device_tracker.beacon_{}'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
TEST_ZONE_LAT = 45.0
TEST_ZONE_LON = 90.0
TEST_ZONE_DEG_PER_M = 0.0000127
FIVE_M = TEST_ZONE_DEG_PER_M * 5.0
# Home Assistant Zones
INNER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT+0.1,
'longitude': TEST_ZONE_LON+0.1,
'radius': 50
}
OUTER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT,
'longitude': TEST_ZONE_LON,
'radius': 100000
}
def build_message(test_params, default_params):
"""Build a test message from overrides and another message."""
new_params = default_params.copy()
new_params.update(test_params)
return new_params
# Default message parameters
DEFAULT_LOCATION_MESSAGE = {
'_type': 'location',
'lon': OUTER_ZONE['longitude'],
'lat': OUTER_ZONE['latitude'],
'acc': 60,
'tid': 'user',
't': 'u',
'batt': 92,
'cog': 248,
'alt': 27,
'p': 101.3977584838867,
'vac': 4,
'tst': 1,
'vel': 0
}
# Owntracks will publish a transition when crossing
# a circular region boundary.
ZONE_EDGE = TEST_ZONE_DEG_PER_M * INNER_ZONE['radius']
DEFAULT_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'c',
'lon': INNER_ZONE['longitude'],
'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'acc': 60,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# iBeacons that are named the same as an HA zone
# are used to trigger enter and leave updates
# for that zone. In this case the "inner" zone.
#
# iBeacons that do not share an HA zone name
# are treated as mobile tracking devices for
# objects which can't track themselves e.g. keys.
#
# iBeacons are typically configured with the
# default lat/lon 0.0/0.0 and have acc 0.0 but
# regardless the reported location is not trusted.
#
# Owntracks will send both a location message
# for the device and an 'event' message for
# the beacon transition.
DEFAULT_BEACON_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'b',
'lon': 0.0,
'lat': 0.0,
'acc': 0.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# Location messages
LOCATION_MESSAGE = DEFAULT_LOCATION_MESSAGE
LOCATION_MESSAGE_INACCURATE = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 2000},
LOCATION_MESSAGE)
LOCATION_MESSAGE_ZERO_ACCURACY = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 0},
LOCATION_MESSAGE)
LOCATION_MESSAGE_NOT_HOME = build_message(
{'lat': OUTER_ZONE['latitude'] - 2.0,
'lon': INNER_ZONE['longitude'] - 2.0,
'acc': 100},
LOCATION_MESSAGE)
# Region GPS messages
REGION_GPS_ENTER_MESSAGE = DEFAULT_TRANSITION_MESSAGE
REGION_GPS_LEAVE_MESSAGE = build_message(
{'lon': INNER_ZONE['longitude'] - ZONE_EDGE * 10,
'lat': INNER_ZONE['latitude'] - ZONE_EDGE * 10,
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
REGION_GPS_ENTER_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_ENTER_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_OUTER = build_message(
{'lon': OUTER_ZONE['longitude'] - 2.0,
'lat': OUTER_ZONE['latitude'] - 2.0,
'desc': 'outer',
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
# Region Beacon messages
REGION_BEACON_ENTER_MESSAGE = DEFAULT_BEACON_TRANSITION_MESSAGE
REGION_BEACON_LEAVE_MESSAGE = build_message(
{'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Mobile Beacon messages
MOBILE_BEACON_ENTER_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE},
DEFAULT_BEACON_TRANSITION_MESSAGE)
MOBILE_BEACON_LEAVE_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE,
'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Waypoint messages
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_ENTITY_NAMES = [
'zone.greg_phone__exp_wayp1',
'zone.greg_phone__exp_wayp2',
'zone.ram_phone__exp_wayp1',
'zone.ram_phone__exp_wayp2',
]
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
class BaseMQTT(unittest.TestCase):
"""Base MQTT assert functions."""
hass = None
def send_message(self, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
fire_mqtt_message(self.hass, topic, mod_message)
self.hass.block_till_done()
def assert_location_state(self, location):
"""Test the assertion of a location state."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_location_latitude(self, latitude):
"""Test the assertion of a location latitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_location_longitude(self, longitude):
"""Test the assertion of a location longitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('longitude'), longitude)
def assert_location_accuracy(self, accuracy):
"""Test the assertion of a location accuracy."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
class TestDeviceTrackerOwnTracks(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
mock_component(self.hass, 'group')
mock_component(self.hass, 'zone')
patcher = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patcher.start()
self.addCleanup(patcher.stop)
orig_context = owntracks.OwnTracksContext
def store_context(*args):
self.context = orig_context(*args)
return self.context
with patch('homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([])), \
patch('homeassistant.components.device_tracker.'
'load_yaml_config_file', return_value=mock_coro({})), \
patch.object(owntracks, 'OwnTracksContext', store_context), \
assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}})
self.hass.states.set(
'zone.inner', 'zoning', INNER_ZONE)
self.hass.states.set(
'zone.inner_2', 'zoning', INNER_ZONE)
self.hass.states.set(
'zone.outer', 'zoning', OUTER_ZONE)
# Clear state between tests
# NB: state "None" is not a state that is created by Device
# so when we compare state to None in the tests this
# is really checking that it is still in its original
# test case state. See Device.async_update.
self.hass.states.set(DEVICE_TRACKER_STATE, None)
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
def assert_mobile_tracker_state(self, location, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker state."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = self.hass.states.get(dev_id)
self.assertEqual(state.state, location)
def assert_mobile_tracker_latitude(self, latitude, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker latitude."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = self.hass.states.get(dev_id)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_mobile_tracker_accuracy(self, accuracy, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker accuracy."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = self.hass.states.get(dev_id)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def test_location_invalid_devid(self): # pylint: disable=invalid-name
"""Test the update of a location."""
self.send_message('owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = self.hass.states.get('device_tracker.paulus_nexus5x')
assert state.state == 'outer'
def test_location_update(self):
"""Test the update of a location."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
self.assert_location_accuracy(LOCATION_MESSAGE['acc'])
self.assert_location_state('outer')
def test_location_inaccurate_gps(self):
"""Test the location for inaccurate GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
# Ignored inaccurate GPS. Location remains at previous.
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
self.assert_location_longitude(LOCATION_MESSAGE['lon'])
def test_location_zero_accuracy_gps(self):
"""Ignore the location for zero accuracy GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
# Ignored inaccurate GPS. Location remains at previous.
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
self.assert_location_longitude(LOCATION_MESSAGE['lon'])
# ------------------------------------------------------------------------
# GPS based event entry / exit testing
def test_event_gps_entry_exit(self):
"""Test the entry event."""
# Entering the owntrack circular region named "inner"
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# asssociated with the inner zone regardless of GPS.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# Exit switches back to GPS
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_location_accuracy(REGION_GPS_LEAVE_MESSAGE['acc'])
self.assert_location_state('outer')
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Now sending a location update moves me again.
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
self.assert_location_accuracy(LOCATION_MESSAGE['acc'])
def test_event_gps_with_spaces(self):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_GPS_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner 2')
message = build_message({'desc': "inner 2"},
REGION_GPS_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
def test_event_gps_entry_inaccurate(self):
"""Test the event for inaccurate entry."""
# Set location to the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_INACCURATE)
# I enter the zone even though the message GPS was inaccurate.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
def test_event_gps_entry_exit_inaccurate(self):
"""Test the event for inaccurate exit."""
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_INACCURATE)
# Exit doesn't use inaccurate gps
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(self.context.regions_entered[USER])
def test_event_gps_entry_exit_zero_accuracy(self):
"""Test entry/exit events with accuracy zero."""
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_ZERO)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_ZERO)
# Exit doesn't use zero gps
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(self.context.regions_entered[USER])
def test_event_gps_exit_outside_zone_sets_away(self):
"""Test the event for exit zone."""
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_location_state('inner')
# Exit message far away GPS location
message = build_message(
{'lon': 90.0,
'lat': 90.0},
REGION_GPS_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# Exit forces zone change to away
self.assert_location_state(STATE_NOT_HOME)
def test_event_gps_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_location_accuracy(REGION_GPS_LEAVE_MESSAGE['acc'])
self.assert_location_state('outer')
def test_event_gps_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_location_accuracy(REGION_GPS_LEAVE_MESSAGE['acc'])
self.assert_location_state('outer')
def test_event_gps_entry_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(REGION_GPS_ENTER_MESSAGE['lat'])
self.assert_location_state('inner')
def test_event_gps_exit_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_location_state('outer')
def test_event_entry_zone_loading_dash(self):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_GPS_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
# Region Beacon based event entry / exit testing
def test_event_region_entry_exit(self):
"""Test the entry event."""
# Seeing a beacon named "inner"
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# asssociated with the inner zone regardless of GPS.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# Exit switches back to GPS but the beacon has no coords
# so I am still located at the center of the inner region
# until I receive a location update.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
# Now sending a location update moves me again.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
self.assert_location_accuracy(LOCATION_MESSAGE['acc'])
def test_event_region_with_spaces(self):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner 2')
message = build_message({'desc': "inner 2"},
REGION_BEACON_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
def test_event_region_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# See 'inner' region beacon
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
self.assert_location_state('inner')
# See 'inner_2' region beacon
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner zone.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner')
def test_event_region_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner_2 zone.
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_accuracy(INNER_ZONE['radius'])
self.assert_location_state('inner_2')
def test_event_beacon_unknown_zone_no_location(self):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. Except
# in this case my Device hasn't had a location message
# yet so it's in an odd state where it has state.state
# None and no GPS coords so set the beacon to.
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# My current state is None because I haven't seen a
# location message or a GPS or Region # Beacon event
# message. None is the state the test harness set for
# the Device during test case setup.
self.assert_location_state('None')
# home is the state of a Device constructed through
# the normal code path on it's first observation with
# the conditions I pass along.
self.assert_mobile_tracker_state('home', 'unknown')
def test_event_beacon_unknown_zone(self):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. First I
# set my location so that my state is 'outer'
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_state('outer')
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
# My state is still outer and now the unknown beacon
# has joined me at outer.
self.assert_location_state('outer')
self.assert_mobile_tracker_state('outer', 'unknown')
def test_event_beacon_entry_zone_loading_dash(self):
"""Test the event for beacon zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_BEACON_ENTER_MESSAGE)
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
# ------------------------------------------------------------------------
# Mobile Beacon based event entry / exit testing
def test_mobile_enter_move_beacon(self):
"""Test the movement of a beacon."""
# I am in the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# I see the 'keys' beacon. I set the location of the
# beacon_keys tracker to my current device location.
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.assert_mobile_tracker_latitude(LOCATION_MESSAGE['lat'])
self.assert_mobile_tracker_state('outer')
# Location update to outside of defined zones.
# I am now 'not home' and neither are my keys.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
self.assert_location_state(STATE_NOT_HOME)
self.assert_mobile_tracker_state(STATE_NOT_HOME)
not_home_lat = LOCATION_MESSAGE_NOT_HOME['lat']
self.assert_location_latitude(not_home_lat)
self.assert_mobile_tracker_latitude(not_home_lat)
def test_mobile_enter_exit_region_beacon(self):
"""Test the enter and the exit of a mobile beacon."""
# I am in the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.assert_mobile_tracker_latitude(OUTER_ZONE['latitude'])
self.assert_mobile_tracker_state('outer')
# GPS enter message should move beacon
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_mobile_tracker_latitude(INNER_ZONE['latitude'])
self.assert_mobile_tracker_state(REGION_GPS_ENTER_MESSAGE['desc'])
# Exit inner zone to outer zone should move beacon to
# center of outer zone
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
self.assert_mobile_tracker_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_mobile_tracker_state('outer')
def test_mobile_exit_move_beacon(self):
"""Test the exit move of a beacon."""
# I am in the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.assert_mobile_tracker_latitude(OUTER_ZONE['latitude'])
self.assert_mobile_tracker_state('outer')
# Exit mobile beacon, should set location
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assert_mobile_tracker_latitude(OUTER_ZONE['latitude'])
self.assert_mobile_tracker_state('outer')
# Move after exit should do nothing
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
self.assert_mobile_tracker_latitude(OUTER_ZONE['latitude'])
self.assert_mobile_tracker_state('outer')
def test_mobile_multiple_async_enter_exit(self):
"""Test the multiple entering."""
# Test race condition
for _ in range(0, 20):
fire_mqtt_message(
self.hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
fire_mqtt_message(
self.hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_LEAVE_EVENT_MESSAGE))
fire_mqtt_message(
self.hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
self.hass.block_till_done()
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assertEqual(len(self.context.mobile_beacons_active['greg_phone']),
0)
def test_mobile_multiple_enter_exit(self):
"""Test the multiple entering."""
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assertEqual(len(self.context.mobile_beacons_active['greg_phone']),
0)
def test_complex_movement(self):
"""Test a complex sequence representative of real-world use."""
# I am in the outer zone.
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_state('outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_mobile_tracker_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# Slightly odd, I leave the location by gps before I lose
# sight of the region beacon. This is also a little odd in
# that my GPS coords are now in the 'outer' zone but I did not
# "enter" that zone when I started up so my location is not
# the center of OUTER_ZONE, but rather just my GPS location.
# gps out of inner event and location
location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_mobile_tracker_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_location_state('outer')
self.assert_mobile_tracker_state('outer')
# region beacon leave inner
location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(location_message['lat'])
self.assert_mobile_tracker_latitude(location_message['lat'])
self.assert_location_state('outer')
self.assert_mobile_tracker_state('outer')
# lose keys mobile beacon
lost_keys_location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, lost_keys_location_message)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assert_location_latitude(lost_keys_location_message['lat'])
self.assert_mobile_tracker_latitude(lost_keys_location_message['lat'])
self.assert_location_state('outer')
self.assert_mobile_tracker_state('outer')
# gps leave outer
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
self.assert_location_latitude(LOCATION_MESSAGE_NOT_HOME['lat'])
self.assert_mobile_tracker_latitude(lost_keys_location_message['lat'])
self.assert_location_state('not_home')
self.assert_mobile_tracker_state('outer')
# location move not home
location_message = build_message(
{'lat': LOCATION_MESSAGE_NOT_HOME['lat'] - FIVE_M,
'lon': LOCATION_MESSAGE_NOT_HOME['lon'] - FIVE_M},
LOCATION_MESSAGE_NOT_HOME)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(location_message['lat'])
self.assert_mobile_tracker_latitude(lost_keys_location_message['lat'])
self.assert_location_state('not_home')
self.assert_mobile_tracker_state('outer')
def test_complex_movement_sticky_keys_beacon(self):
"""Test a complex sequence which was previously broken."""
# I am not_home
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_state('outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.send_message(EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_mobile_tracker_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
# This sequence of moves would cause keys to follow
# greg_phone around even after the OwnTracks sent
# a mobile beacon 'leave' event for the keys.
# leave keys
self.send_message(LOCATION_TOPIC, location_message)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# leave inner region beacon
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# enter inner region beacon
self.send_message(EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_latitude(INNER_ZONE['latitude'])
self.assert_location_state('inner')
# enter keys
self.send_message(EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# leave keys
self.send_message(LOCATION_TOPIC, location_message)
self.send_message(EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# leave inner region beacon
self.send_message(EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
self.send_message(LOCATION_TOPIC, location_message)
self.assert_location_state('inner')
self.assert_mobile_tracker_state('inner')
# GPS leave inner region, I'm in the 'outer' region now
# but on GPS coords
leave_location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
self.send_message(EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
self.send_message(LOCATION_TOPIC, leave_location_message)
self.assert_location_state('outer')
self.assert_mobile_tracker_state('inner')
self.assert_location_latitude(REGION_GPS_LEAVE_MESSAGE['lat'])
self.assert_mobile_tracker_latitude(INNER_ZONE['latitude'])
def test_waypoint_import_simple(self):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[1])
self.assertTrue(wayp is not None)
def test_waypoint_import_blacklist(self):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_no_whitelist(self):
"""Test import of list of waypoints with no whitelist set."""
@asyncio.coroutine
def mock_see(**kwargs):
"""Fake see method for owntracks."""
return
test_config = {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True
}
run_coroutine_threadsafe(owntracks.async_setup_scanner(
self.hass, test_config, mock_see), self.hass.loop).result()
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is not None)
def test_waypoint_import_bad_json(self):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_existing(self):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
new_wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp == new_wayp)
def generate_ciphers(secret):
"""Generate test ciphers for the DEFAULT_LOCATION_MESSAGE."""
# libnacl ciphertext generation will fail if the module
# cannot be imported. However, the test for decryption
# also relies on this library and won't be run without it.
import json
import pickle
import base64
try:
from libnacl import crypto_secretbox_KEYBYTES as KEYLEN
from libnacl.secret import SecretBox
key = secret.encode("utf-8")[:KEYLEN].ljust(KEYLEN, b'\0')
ctxt = base64.b64encode(SecretBox(key).encrypt(
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"))
).decode("utf-8")
except (ImportError, OSError):
ctxt = ''
mctxt = base64.b64encode(
pickle.dumps(
(secret.encode("utf-8"),
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"))
)
).decode("utf-8")
return (ctxt, mctxt)
TEST_SECRET_KEY = 's3cretkey'
CIPHERTEXT, MOCK_CIPHERTEXT = generate_ciphers(TEST_SECRET_KEY)
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
'_type': 'encrypted',
'data': CIPHERTEXT
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': MOCK_CIPHERTEXT
}
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
(mkey, plaintext) = pickle.loads(ciphertext)
if key != mkey:
raise ValueError()
return plaintext
return (len(TEST_SECRET_KEY), mock_decrypt)
class TestDeviceTrackerOwnTrackConfigs(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
mock_component(self.hass, 'group')
mock_component(self.hass, 'zone')
patch_load = patch(
'homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([]))
patch_load.start()
self.addCleanup(patch_load.stop)
patch_save = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patch_save.start()
self.addCleanup(patch_save.stop)
def teardown_method(self, method):
"""Tear down resources."""
self.hass.stop()
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload(self):
"""Test encrypted payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_topic_key(self):
"""Test encrypted payload with a topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: TEST_SECRET_KEY,
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_key(self):
"""Test encrypted payload with no key, ."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
# key missing
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_key(self):
"""Test encrypted payload with wrong key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: 'wrong key',
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_topic_key(self):
"""Test encrypted payload with wrong topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_topic_key(self):
"""Test encrypted payload with no topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
try:
import libnacl
except (ImportError, OSError):
libnacl = None
@unittest.skipUnless(libnacl, "libnacl/libsodium is not installed")
def test_encrypted_payload_libsodium(self):
"""Test sending encrypted message payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(LOCATION_MESSAGE['lat'])
| |
# -*- coding: utf-8 -*-
"""
"""
import unittest
from datetime import datetime
from decimal import Decimal
from unittest.mock import patch
from vulyk.app import TASKS_TYPES
from vulyk.blueprints.gamification import listeners
from vulyk.blueprints.gamification.core.rules import Rule
from vulyk.blueprints.gamification.core.state import (
UserState, InvalidUserStateException)
from vulyk.blueprints.gamification.models.events import EventModel
from vulyk.blueprints.gamification.models.rules import (
RuleModel, ProjectAndFreeRules)
from vulyk.blueprints.gamification.models.state import UserStateModel
from vulyk.blueprints.gamification.models.task_types import \
POINTS_PER_TASK_KEY, COINS_PER_TASK_KEY
from vulyk.models.stats import WorkSession
from vulyk.models.tasks import AbstractTask, AbstractAnswer, Batch
from vulyk.models.user import User, Group
from .fixtures import FakeType
from ..base import BaseTest
from ..fixtures import FakeType as BaseFakeType
class TestAllocationOfMoneyAndPoints(BaseTest):
TIMESTAMP = datetime.now()
USER = None
BATCH = None
TASKS_TYPES.clear()
@classmethod
def setUpClass(cls):
super().setUpClass()
Group.objects.create(
description='test', id='default', allowed_types=[
FakeType.type_name, BaseFakeType.type_name
])
cls.USER = User(username='user0', email='user0@email.com').save()
@classmethod
def tearDownClass(cls):
User.objects.delete()
Group.objects.delete()
super().tearDownClass()
def setUp(self):
super().setUp()
self.BATCH = Batch(
id='default',
task_type=FakeType.type_name,
tasks_count=1,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: 1.0,
COINS_PER_TASK_KEY: 1.0
}
).save()
def tearDown(self):
TASKS_TYPES.clear()
AbstractTask.objects.delete()
AbstractAnswer.objects.delete()
WorkSession.objects.delete()
Batch.objects.delete()
UserStateModel.objects.delete()
EventModel.objects.delete()
RuleModel.objects.delete()
super().tearDown()
def test_single_allocation(self):
task_type = FakeType({})
TASKS_TYPES[task_type.type_name] = task_type
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=self.BATCH,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task, self.USER.id)
task_type.on_task_done(self.USER, task.id, {'result': 'result'})
events = EventModel.objects.filter(user=self.USER)
state = UserStateModel.get_or_create_by_user(user=self.USER)
self.assertEqual(len(events), 1)
ev = events[0].to_event()
self.assertEqual(ev.points_given, Decimal('1.0'))
self.assertEqual(ev.coins, Decimal('1.0'))
self.assertEqual(ev.achievements, [])
self.assertEqual(ev.level_given, 1)
self.assertEqual(ev.viewed, False)
self.assertEqual(state.points, ev.points_given)
self.assertEqual(state.actual_coins, Decimal())
self.assertEqual(state.potential_coins, ev.coins)
self.assertEqual(state.level, 1)
self.assertEqual(state.achievements, {})
self.assertEqual(ev.timestamp, state.last_changed)
def test_single_allocation_badge_given(self):
task_type = FakeType({})
TASKS_TYPES[task_type.type_name] = task_type
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=self.BATCH,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
rule = Rule(
badge='Faithful Fury',
name='rule_1',
description='Kill one fly',
bonus=0,
tasks_number=1,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
RuleModel.from_rule(rule).save()
task_type.work_session_manager.start_work_session(task, self.USER.id)
task_type.on_task_done(self.USER, task.id, {'result': 'result'})
events = EventModel.objects.filter(user=self.USER)
state = UserStateModel.get_or_create_by_user(user=self.USER)
ev = events[0].to_event()
self.assertEqual(ev.points_given, Decimal('1.0'))
self.assertEqual(ev.coins, Decimal('1.0'))
self.assertEqual(ev.level_given, 1)
self.assertEqual(ev.viewed, False)
self.assertEqual(ev.achievements, [rule])
self.assertEqual(state.points, ev.points_given)
self.assertEqual(state.actual_coins, Decimal())
self.assertEqual(state.potential_coins, ev.coins)
self.assertEqual(state.level, 1)
self.assertEqual(ev.timestamp, state.last_changed)
self.assertEqual(state.achievements, {rule.id: rule})
def test_double_allocation(self):
task_type = FakeType({})
TASKS_TYPES[task_type.type_name] = task_type
batch = Batch(
id='default',
task_type=task_type.type_name,
tasks_count=2,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: 2.5,
COINS_PER_TASK_KEY: 1.5
}
).save()
task1 = task_type.task_model(
id='task1',
task_type=task_type.type_name,
batch=batch,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task2 = task_type.task_model(
id='task2',
task_type=task_type.type_name,
batch=batch,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task1, self.USER.id)
task_type.on_task_done(self.USER, task1.id, {'result': 'result'})
task_type.work_session_manager.start_work_session(task2, self.USER.id)
task_type.on_task_done(self.USER, task2.id, {'result': 'result'})
state = UserStateModel.get_or_create_by_user(user=self.USER)
self.assertEqual(state.points, Decimal('5.0'))
self.assertEqual(state.level, 1)
self.assertEqual(state.actual_coins, Decimal())
self.assertEqual(state.potential_coins, Decimal('3.0'))
events = EventModel.objects \
.filter(user=self.USER) \
.order_by('-timestamp')
self.assertEqual(len(events), 2)
self.assertEqual(events[0].timestamp, state.last_changed)
self.assertEqual(events[0].level_given, None)
self.assertEqual(events[1].level_given, 1)
for ev in events:
self.assertEqual(ev.points_given, Decimal('2.5'))
self.assertEqual(ev.coins, Decimal('1.5'))
self.assertEqual(ev.achievements, [])
self.assertEqual(ev.viewed, False)
def test_wrong_allocation(self):
task_type = FakeType({})
TASKS_TYPES[task_type.type_name] = task_type
batch = Batch(
id='default',
task_type=task_type.type_name,
tasks_count=1,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: -1.0,
COINS_PER_TASK_KEY: -1.0
}
).save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=batch,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task, self.USER.id)
self.assertRaises(
InvalidUserStateException,
lambda: task_type.on_task_done(self.USER,
task.id,
{'result': 'result'})
)
def test_double_allocation_different_batches(self):
task_type = FakeType({})
TASKS_TYPES[task_type.type_name] = task_type
batch1 = Batch(
id='default',
task_type=task_type.type_name,
tasks_count=1,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: 2.5,
COINS_PER_TASK_KEY: 1.5
}
).save()
batch2 = Batch(
id='yummy',
task_type=task_type.type_name,
tasks_count=1,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: 25,
COINS_PER_TASK_KEY: 15
}
).save()
task1 = task_type.task_model(
id='task1',
task_type=task_type.type_name,
batch=batch1,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task2 = task_type.task_model(
id='task2',
task_type=task_type.type_name,
batch=batch2,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task1, self.USER.id)
task_type.on_task_done(self.USER, task1.id, {'result': 'result'})
task_type.work_session_manager.start_work_session(task2, self.USER.id)
task_type.on_task_done(self.USER, task2.id, {'result': 'result'})
state = UserStateModel.get_or_create_by_user(user=self.USER)
self.assertEqual(state.points, Decimal('27.5'))
self.assertEqual(state.level, 2)
self.assertEqual(state.actual_coins, Decimal())
self.assertEqual(state.potential_coins, Decimal('16.5'))
events = EventModel.objects \
.filter(user=self.USER) \
.order_by('-timestamp')
self.assertEqual(len(events), 2)
# Order of events is reversed
self.assertEqual(events[0].timestamp, state.last_changed)
self.assertEqual(events[0].level_given, 2)
self.assertEqual(events[1].level_given, 1)
for ev in events:
self.assertIn(ev.points_given, [Decimal('2.5'), Decimal('25')])
self.assertIn(ev.coins, [Decimal('1.5'), Decimal('15')])
self.assertEqual(ev.achievements, [])
self.assertEqual(ev.viewed, False)
def test_single_allocation_no_events(self):
task_type = BaseFakeType({})
TASKS_TYPES[task_type.type_name] = task_type
batch = Batch(
id='default',
task_type=task_type.type_name,
tasks_count=1,
tasks_processed=0
).save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=batch,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task, self.USER.id)
task_type.on_task_done(self.USER, task.id, {'result': 'result'})
self.assertEqual(UserStateModel.objects(user=self.USER).count(), 0)
self.assertEqual(EventModel.objects(user=self.USER).count(), 0)
def test_double_allocation_mixed_project(self):
task_type_base = BaseFakeType({})
task_type_new = FakeType({})
TASKS_TYPES[task_type_base.type_name] = task_type_base
TASKS_TYPES[task_type_new.type_name] = task_type_new
batch1 = Batch(
id='default',
task_type=task_type_base.type_name,
tasks_count=1,
tasks_processed=0
).save()
batch2 = Batch(
id='yummy',
task_type=task_type_new.type_name,
tasks_count=1,
tasks_processed=0,
batch_meta={
POINTS_PER_TASK_KEY: 25,
COINS_PER_TASK_KEY: 15
}
).save()
task1 = task_type_base.task_model(
id='task1',
task_type=task_type_base.type_name,
batch=batch1,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task2 = task_type_new.task_model(
id='task2',
task_type=task_type_new.type_name,
batch=batch2,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type_base.work_session_manager.start_work_session(task1,
self.USER.id)
task_type_base.on_task_done(self.USER, task1.id, {'result': 'result'})
task_type_new.work_session_manager.start_work_session(task2,
self.USER.id)
task_type_new.on_task_done(self.USER, task2.id, {'result': 'result'})
state = UserStateModel.get_or_create_by_user(user=self.USER)
self.assertEqual(state.points, Decimal('25'))
self.assertEqual(state.level, 2)
self.assertEqual(state.actual_coins, Decimal())
self.assertEqual(state.potential_coins, Decimal('15'))
events = EventModel.objects.filter(user=self.USER)
self.assertEqual(len(events), 1)
ev = events[0]
self.assertEqual(ev.points_given, Decimal('25'))
self.assertEqual(ev.coins, Decimal('15'))
self.assertEqual(ev.achievements, [])
self.assertEqual(ev.level_given, 2)
self.assertEqual(ev.viewed, False)
self.assertEqual(ev.timestamp, state.last_changed)
class TestAllocationBadges(BaseTest):
SATURDAY = datetime(2017, 7, 8)
SUNDAY = datetime(2017, 7, 9)
MONDAY = datetime(2017, 7, 10)
USER = None # type: User
GET_ACTUAL_RULES = 'vulyk.blueprints.gamification.models.rules.' \
'RuleModel.get_actual_rules'
@classmethod
def setUpClass(cls):
super().setUpClass()
Group.objects.create(
description='test', id='default', allowed_types=[
FakeType.type_name, BaseFakeType.type_name
])
cls.USER = User(username='user0', email='user0@email.com').save()
@classmethod
def tearDownClass(cls):
User.objects.delete()
Group.objects.delete()
super().tearDownClass()
def test_get_rules_no_earned_no_batch_no_weekend(self):
rookie_state = UserState(
user=self.USER,
level=0,
points=Decimal(),
actual_coins=Decimal(),
potential_coins=Decimal(),
achievements=[],
last_changed=self.MONDAY
)
def patched_rules(**kwargs):
assert kwargs['skip_ids'] == []
assert kwargs['rule_filter'] == ProjectAndFreeRules('')
assert not kwargs['is_weekend']
with patch(self.GET_ACTUAL_RULES, patched_rules):
listeners.get_actual_rules(rookie_state, '', self.MONDAY)
self.assertRaises(
AssertionError,
lambda: listeners.get_actual_rules(
rookie_state,
'batch_should_not_be_here',
self.MONDAY))
def test_get_rules_badges_no_batch_no_weekend(self):
rule = Rule(
badge='',
name='rule_1',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True,
rule_id='100')
state = UserState(
user=self.USER,
level=20,
points=Decimal(5000),
actual_coins=Decimal(3240),
potential_coins=Decimal(4000),
achievements=[rule],
last_changed=self.MONDAY
)
def patched_rules(**kwargs):
assert kwargs['skip_ids'] == ['100']
assert kwargs['rule_filter'] == ProjectAndFreeRules('')
assert not kwargs['is_weekend']
with patch(self.GET_ACTUAL_RULES, patched_rules):
listeners.get_actual_rules(state, '', self.MONDAY)
def test_get_rules_no_badges_no_batch_weekend(self):
state = UserState(
user=self.USER,
level=20,
points=Decimal(5000),
actual_coins=Decimal(3240),
potential_coins=Decimal(4000),
achievements=[],
last_changed=self.MONDAY
)
def patched_rules(**kwargs):
assert kwargs['skip_ids'] == []
assert kwargs['rule_filter'] == ProjectAndFreeRules('')
assert kwargs['is_weekend']
with patch(self.GET_ACTUAL_RULES, patched_rules):
listeners.get_actual_rules(state, '', self.SATURDAY)
listeners.get_actual_rules(state, '', self.SUNDAY)
def test_get_rules_badges_batch_weekend(self):
rule = Rule(
badge='',
name='rule_1',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True,
rule_id='100')
state = UserState(
user=self.USER,
level=20,
points=Decimal(5000),
actual_coins=Decimal(3240),
potential_coins=Decimal(4000),
achievements=[rule],
last_changed=self.MONDAY
)
def patched_rules(**kwargs):
assert kwargs['skip_ids'] == ['100']
assert kwargs['rule_filter'] == ProjectAndFreeRules('batch_1')
assert kwargs['is_weekend']
with patch(self.GET_ACTUAL_RULES, patched_rules):
listeners.get_actual_rules(state, 'batch_1', self.SUNDAY)
if __name__ == '__main__':
unittest.main()
| |
import sqlalchemy as sa
from sqlalchemy.ext import compiler as sa_compiler
from sqlalchemy.schema import DDLElement
from .compat import string_types
def _check_if_key_exists(key):
return isinstance(key, sa.Column) or key
def get_table_attributes(preparer,
diststyle=None,
distkey=None,
sortkey=None,
interleaved_sortkey=None):
"""
Parse the table attributes into an acceptable string for Redshift,
checking for valid combinations of distribution options.
Parameters
----------
preparer: RedshiftIdentifierPreparer, required
The preparer associated with the compiler, usually accessed through
compiler.preparer. You can use a RedshiftDDLCompiler instance to
access it.
diststyle: str, optional
The diststle to use for the table attributes. This must be one of:
("ALL", "EVEN", "KEY"). If unset, Redshift passes AUTO. If KEY is used
a distkey argument must be provided. Inversely, if a diststyle other
than KEY is provided, a distkey argument cannot be provided.
distkey: str or sqlalchemy.Column, optional
The distribution key to use the for the table attributes. This can be
provided without any distsyle specified or with KEY diststyle
specified.
sortkey: str or sqlalchemy.Column (or iterable thereof), optional
The (compound) sort key(s) to use for the table attributes. Mutually
exclusive option from interleaved_sortkey.
interleaved_sortkey: str or sqlalchemy.Column (or iterable), optional
The (interleaved) sort key(s) to use for the table attributes. Mutually
exclusive option from sortkey.
Returns
-------
string
the table_attributes to append to a DDLElement, normally when creating
a table or materialized view.
Raises
------
sqlalchemy.exc.ArgumentError
when an invalid diststyle is set,
when incompatable (diststyle, distkey) pairs are used,
when both sortkey and interleaved_sortkey is specified.
"""
text = ""
has_distkey = _check_if_key_exists(distkey)
if diststyle:
diststyle = diststyle.upper()
if diststyle not in ('EVEN', 'KEY', 'ALL'):
raise sa.exc.ArgumentError(
u"diststyle {0} is invalid".format(diststyle)
)
if diststyle != 'KEY' and has_distkey:
raise sa.exc.ArgumentError(
u"DISTSTYLE EVEN/ALL is not compatible with a DISTKEY."
)
if diststyle == 'KEY' and not has_distkey:
raise sa.exc.ArgumentError(
u"DISTKEY specification is required for DISTSTYLE KEY"
)
text += " DISTSTYLE " + diststyle
if has_distkey:
if isinstance(distkey, sa.Column):
distkey = distkey.name
text += " DISTKEY ({0})".format(preparer.quote(distkey))
has_sortkey = _check_if_key_exists(sortkey)
has_interleaved = _check_if_key_exists(interleaved_sortkey)
if has_sortkey and has_interleaved:
raise sa.exc.ArgumentError(
"Parameters sortkey and interleaved_sortkey are "
"mutually exclusive; you may not specify both."
)
if has_sortkey or has_interleaved:
keys = sortkey if has_sortkey else interleaved_sortkey
if isinstance(keys, (string_types, sa.Column)):
keys = [keys]
keys = [key.name if isinstance(key, sa.Column) else key
for key in keys]
if has_interleaved:
text += " INTERLEAVED"
sortkey_string = ", ".join(preparer.quote(key)
for key in keys)
text += " SORTKEY ({0})".format(sortkey_string)
return text
class CreateMaterializedView(DDLElement):
"""
DDLElement to create a materialized view in Redshift where the distribution
options can be set.
SEE:
docs.aws.amazon.com/redshift/latest/dg/materialized-view-create-sql-command
This works for any selectable. Consider the trivial example of this table:
>>> import sqlalchemy as sa
>>> from sqlalchemy_redshift.dialect import CreateMaterializedView
>>> engine = sa.create_engine('redshift+psycopg2://example')
>>> metadata = sa.MetaData()
>>> user = sa.Table(
... 'user',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String)
... )
>>> selectable = sa.select([user.c.id, user.c.name], from_obj=user)
>>> view = CreateMaterializedView(
... 'materialized_view_of_users',
... selectable,
... distkey='id',
... sortkey='name'
... )
>>> print(view.compile(engine))
<BLANKLINE>
CREATE MATERIALIZED VIEW materialized_view_of_users
DISTKEY (id) SORTKEY (name)
AS SELECT "user".id, "user".name
FROM "user"
<BLANKLINE>
<BLANKLINE>
The materialized view can take full advantage of Redshift's distributed
architecture via distribution styles and sort keys.
The CreateMaterializedView is a DDLElement, so it can be executed via any
execute() command, be it from an Engine, Session, or Connection.
"""
def __init__(self, name, selectable, backup=True, diststyle=None,
distkey=None, sortkey=None, interleaved_sortkey=None):
"""
Parameters
----------
name: str, required
the name of the materialized view to be created.
selectable: str, required
the sqlalchemy selectable to be the base query for the view.
diststyle: str, optional
The diststle to use for the table attributes. This must be one of:
("ALL", "EVEN", "KEY"). If unset, Redshift passes AUTO. If KEY is
used, a distkey argument must be provided. Inversely, if
a diststyle other than KEY is provided, a distkey argument cannot
be provided.
distkey: str or sqlalchemy.Column, optional
The distribution key to use the for the table attributes. This can
be provided without any distsyle specified or with KEY diststyle
specified.
sortkey: str or sqlalchemy.Column (or iterable thereof), optional
The (compound) sort key(s) to use for the table attributes.
Mutually exclusive option from interleaved_sortkey.
interleaved_sortkey: str or sqlalchemy.Column (or iterable), optional
The (interleaved) sort key(s) to use for the table attributes.
Mutually exclusive option from sortkey.
"""
self.name = name
self.selectable = selectable
self.backup = backup
self.diststyle = diststyle
self.distkey = distkey
self.sortkey = sortkey
self.interleaved_sortkey = interleaved_sortkey
@sa_compiler.compiles(CreateMaterializedView)
def compile_create_materialized_view(element, compiler, **kw):
"""
Returns the sql query that creates the materialized view
"""
text = """\
CREATE MATERIALIZED VIEW {name}
{backup}
{table_attributes}
AS {selectable}\
"""
table_attributes = get_table_attributes(
compiler.preparer,
diststyle=element.diststyle,
distkey=element.distkey,
sortkey=element.sortkey,
interleaved_sortkey=element.interleaved_sortkey
)
# Defaults to yes, so omit default cas3
backup = "" if element.backup else "BACKUP NO "
selectable = compiler.sql_compiler.process(element.selectable,
literal_binds=True)
text = text.format(
name=element.name,
backup=backup,
table_attributes=table_attributes,
selectable=selectable
)
# Clean it up to have no leading spaces
text = "\n".join([line.strip() for line in text.split("\n")
if line.strip()])
return text
class DropMaterializedView(DDLElement):
"""
Drop the materialized view from the database.
SEE:
docs.aws.amazon.com/redshift/latest/dg/materialized-view-drop-sql-command
This undoes the create command, as expected:
>>> import sqlalchemy as sa
>>> from sqlalchemy_redshift.dialect import DropMaterializedView
>>> engine = sa.create_engine('redshift+psycopg2://example')
>>> drop = DropMaterializedView(
... 'materialized_view_of_users',
... if_exists=True
... )
>>> print(drop.compile(engine))
<BLANKLINE>
DROP MATERIALIZED VIEW IF EXISTS materialized_view_of_users
<BLANKLINE>
<BLANKLINE>
This can be included in any execute() statement.
"""
def __init__(self, name, if_exists=False, cascade=False):
"""
Build the DropMaterializedView DDLElement.
Parameters
----------
name: str
name of the materialized view to drop
if_exists: bool, optional
if True, the IF EXISTS clause is added. This will make the query
successful even if the view does not exist, i.e. it lets you drop
a non-existant view. Defaults to False.
cascade: bool, optional
if True, the CASCADE clause is added. This will drop all
views/objects in the DB that depend on this materialized view.
Defaults to False.
"""
self.name = name
self.if_exists = if_exists
self.cascade = cascade
@sa_compiler.compiles(DropMaterializedView)
def compile_drop_materialized_view(element, compiler, **kw):
"""
Formats and returns the drop statement for materialized views.
"""
text = "DROP MATERIALIZED VIEW {if_exists}{name}{cascade}"
if_exists = "IF EXISTS " if element.if_exists else ""
cascade = " CASCADE" if element.cascade else ""
return text.format(if_exists=if_exists, name=element.name, cascade=cascade)
| |
# Open mode AP tests
# Copyright (c) 2014, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import struct
import subprocess
import time
import os
import hostapd
import hwsim_utils
from tshark import run_tshark
from utils import alloc_fail
from wpasupplicant import WpaSupplicant
def test_ap_open(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
ev = hapd.wait_event([ "AP-STA-DISCONNECTED" ], timeout=5)
if ev is None:
raise Exception("No disconnection event received from hostapd")
def test_ap_open_packet_loss(dev, apdev):
"""AP with open mode configuration and large packet loss"""
params = { "ssid": "open",
"ignore_probe_probability": "0.5",
"ignore_auth_probability": "0.5",
"ignore_assoc_probability": "0.5",
"ignore_reassoc_probability": "0.5" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(0, 3):
dev[i].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 3):
dev[i].wait_connected(timeout=20)
def test_ap_open_unknown_action(dev, apdev):
"""AP with open mode configuration and unknown Action frame"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
bssid = apdev[0]['bssid']
cmd = "MGMT_TX {} {} freq=2412 action=765432".format(bssid, bssid)
if "FAIL" in dev[0].request(cmd):
raise Exception("Could not send test Action frame")
ev = dev[0].wait_event(["MGMT-TX-STATUS"], timeout=10)
if ev is None:
raise Exception("Timeout on MGMT-TX-STATUS")
if "result=SUCCESS" not in ev:
raise Exception("AP did not ack Action frame")
def test_ap_open_reconnect_on_inactivity_disconnect(dev, apdev):
"""Reconnect to open mode AP after inactivity related disconnection"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
hapd.request("DEAUTHENTICATE " + dev[0].p2p_interface_addr() + " reason=4")
dev[0].wait_disconnected(timeout=5)
dev[0].wait_connected(timeout=2, error="Timeout on reconnection")
def test_ap_open_assoc_timeout(dev, apdev):
"""AP timing out association"""
ssid = "test"
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].scan(freq="2412")
hapd.set("ext_mgmt_frame_handling", "1")
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 11:
break
req = None
if not req:
raise Exception("Authentication frame not received")
resp = {}
resp['fc'] = req['fc']
resp['da'] = req['sa']
resp['sa'] = req['da']
resp['bssid'] = req['bssid']
resp['payload'] = struct.pack('<HHH', 0, 2, 0)
hapd.mgmt_tx(resp)
assoc = 0
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 0:
assoc += 1
if assoc == 3:
break
if assoc != 3:
raise Exception("Association Request frames not received: assoc=%d" % assoc)
hapd.set("ext_mgmt_frame_handling", "0")
dev[0].wait_connected(timeout=15)
def test_ap_open_id_str(dev, apdev):
"""AP with open mode and id_str"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412", id_str="foo",
wait_connect=False)
ev = dev[0].wait_connected(timeout=10)
if "id_str=foo" not in ev:
raise Exception("CTRL-EVENT-CONNECT did not have matching id_str: " + ev)
if dev[0].get_status_field("id_str") != "foo":
raise Exception("id_str mismatch")
def test_ap_open_select_any(dev, apdev):
"""AP with open mode and select any network"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
id = dev[0].connect("unknown", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].select_network(id)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
dev[0].select_network("any")
dev[0].wait_connected(timeout=10)
def test_ap_open_unexpected_assoc_event(dev, apdev):
"""AP with open mode and unexpected association event"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=15)
dev[0].dump_monitor()
# This will be accepted due to matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_connected(timeout=15)
dev[0].dump_monitor()
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=5)
dev[0].dump_monitor()
# This will result in disconnection due to no matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_disconnected(timeout=15)
def test_ap_bss_load(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'],
{ "ssid": "open",
"bss_load_update_period": "10" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
# this does not really get much useful output with mac80211_hwsim currently,
# but run through the channel survey update couple of times
for i in range(0, 10):
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
time.sleep(0.15)
def hapd_out_of_mem(hapd, apdev, count, func):
with alloc_fail(hapd, count, func):
started = False
try:
hostapd.add_ap(apdev['ifname'], { "ssid": "open" })
started = True
except:
pass
if started:
raise Exception("hostapd interface started even with memory allocation failure: " + arg)
def test_ap_open_out_of_memory(dev, apdev):
"""hostapd failing to setup interface due to allocation failure"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_alloc_bss_data")
for i in range(1, 3):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_iface_alloc")
for i in range(1, 5):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_config_defaults;hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_driver_init")
for i in range(1, 4):
hapd_out_of_mem(hapd, apdev[1], i, "=wpa_driver_nl80211_drv_init")
# eloop_register_read_sock() call from i802_init()
hapd_out_of_mem(hapd, apdev[1], 1, "eloop_sock_table_add_sock;eloop_register_sock;?eloop_register_read_sock;=i802_init")
# verify that a new interface can still be added when memory allocation does
# not fail
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
def test_bssid_black_white_list(dev, apdev):
"""BSSID black/white list"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist=apdev[1]['bssid'])
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist=apdev[1]['bssid'])
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00/00:00:00:00:00:00",
bssid_blacklist=apdev[1]['bssid'])
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
if dev[2].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[2] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
dev[2].request("REMOVE_NETWORK all")
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00", wait_connect=False)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected dev[2] connectin")
dev[2].request("REMOVE_NETWORK all")
def test_ap_open_wpas_in_bridge(dev, apdev):
"""Open mode AP and wpas interface in a bridge"""
br_ifname='sta-br0'
ifname='wlan5'
try:
_test_ap_open_wpas_in_bridge(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'down'])
subprocess.call(['brctl', 'delif', br_ifname, ifname])
subprocess.call(['brctl', 'delbr', br_ifname])
subprocess.call(['iw', ifname, 'set', '4addr', 'off'])
def _test_ap_open_wpas_in_bridge(dev, apdev):
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
br_ifname='sta-br0'
ifname='wlan5'
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
# First, try a failure case of adding an interface
try:
wpas.interface_add(ifname, br_ifname=br_ifname)
raise Exception("Interface addition succeeded unexpectedly")
except Exception, e:
if "Failed to add" in str(e):
logger.info("Ignore expected interface_add failure due to missing bridge interface: " + str(e))
else:
raise
# Next, add the bridge interface and add the interface again
subprocess.call(['brctl', 'addbr', br_ifname])
subprocess.call(['brctl', 'setfd', br_ifname, '0'])
subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'up'])
subprocess.call(['iw', ifname, 'set', '4addr', 'on'])
subprocess.check_call(['brctl', 'addif', br_ifname, ifname])
wpas.interface_add(ifname, br_ifname=br_ifname)
wpas.connect("open", key_mgmt="NONE", scan_freq="2412")
def test_ap_open_start_disabled(dev, apdev):
"""AP with open mode and beaconing disabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("RELOAD"):
raise Exception("RELOAD failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
def test_ap_open_start_disabled2(dev, apdev):
"""AP with open mode and beaconing disabled (2)"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_ap_open_ifdown(dev, apdev):
"""AP with open mode and external ifconfig down"""
params = { "ssid": "open",
"ap_max_inactivity": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'down'])
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (1)")
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (2)")
ev = hapd.wait_event(["INTERFACE-DISABLED"], timeout=5)
if ev is None:
raise Exception("No INTERFACE-DISABLED event")
# The following wait tests beacon loss detection in mac80211 on dev0.
# dev1 is used to test stopping of AP side functionality on client polling.
dev[1].request("REMOVE_NETWORK all")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'up'])
dev[0].wait_disconnected()
dev[1].wait_disconnected()
ev = hapd.wait_event(["INTERFACE-ENABLED"], timeout=10)
if ev is None:
raise Exception("No INTERFACE-ENABLED event")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_open_disconnect_in_ps(dev, apdev, params):
"""Disconnect with the client in PS to regression-test a kernel bug"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
time.sleep(0.2)
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_MANUAL_POLL)
try:
# inject some traffic
sa = hapd.own_addr()
da = dev[0].own_addr()
hapd.request('DATA_TEST_CONFIG 1')
hapd.request('DATA_TEST_TX {} {} 0'.format(da, sa))
hapd.request('DATA_TEST_CONFIG 0')
# let the AP send couple of Beacon frames
time.sleep(0.3)
# disconnect - with traffic pending - shouldn't cause kernel warnings
dev[0].request("DISCONNECT")
finally:
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_DISABLED)
time.sleep(0.2)
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan_mgt.tim.partial_virtual_bitmap",
["wlan_mgt.tim.partial_virtual_bitmap"])
if out is not None:
state = 0
for l in out.splitlines():
pvb = int(l, 16)
if pvb > 0 and state == 0:
state = 1
elif pvb == 0 and state == 1:
state = 2
if state != 2:
raise Exception("Didn't observe TIM bit getting set and unset (state=%d)" % state)
def test_ap_open_select_network(dev, apdev):
"""Open mode connection and SELECT_NETWORK to change network"""
hapd1 = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
bssid1 = apdev[0]['bssid']
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open2" })
bssid2 = apdev[1]['bssid']
id1 = dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
id2 = dev[0].connect("open2", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd2)
dev[0].select_network(id1)
dev[0].wait_connected()
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry")
hwsim_utils.test_connectivity(dev[0], hapd1)
dev[0].select_network(id2)
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd2)
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry(2)")
def test_ap_open_disable_enable(dev, apdev):
"""AP with open mode getting disabled and re-enabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
for i in range(2):
hapd.request("DISABLE")
dev[0].wait_disconnected()
hapd.request("ENABLE")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
| |
import time
import sys
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, UnexpectedAlertPresentException
from selenium.common.exceptions import ElementClickInterceptedException
from knitter.configure import Browser
from knitter import logger
class CompatibleMethod(object):
"""If the first argument(self. is a class, set/use properties of the class. If the
first argument is a instance, set/use properties of the instance.
This is an extention version for "@classmethod", used for multi-thread issues
of the framework.
EXAMPLE
class A:
a = 0
@CompatibleMethod
def test(self, aaa):
self.a = aaa
@CompatibleMethod
def get(self):
print "get ", self.a
"""
def __init__(self, method):
self._method = method
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
# this is the different part with "classmethod"
if isinstance(obj, klass):
klass = obj
klass.__name__ = klass.__class__.__name__
def newfunc(*args, **kws):
return self._method(klass, *args, **kws)
return newfunc
class WebBrowser:
@CompatibleMethod
def AlertAccept(self):
logger.step_normal("AlertAccept()")
time.sleep(2)
try:
logger.step_normal("switch_to_alert()")
alert = Browser.RunningBrowser.switch_to_alert()
alert.accept()
except NoAlertPresentException:
logger.step_normal("Alert Not Found. ")
try:
logger.step_normal("switch_to_default_content()")
Browser.RunningBrowser.switch_to_default_content()
except Exception as e:
logger.step_warning(e)
pass
@CompatibleMethod
def AlertDismiss(self):
logger.step_normal("AlertDismiss()")
time.sleep(2)
try:
logger.step_normal("switch_to_alert()")
alert = Browser.RunningBrowser.switch_to_alert()
alert.dismiss()
except NoAlertPresentException:
logger.step_normal("Alert Not Found.")
try:
logger.step_normal("switch_to_default_content()")
Browser.RunningBrowser.switch_to_default_content()
except Exception as e:
logger.step_normal(e)
pass
@CompatibleMethod
def AlertSendKeys(self, value):
logger.step_normal("AlertSendKeys [%s]" % value)
try:
Browser.RunningBrowser.switch_to.alert.send_keys(value)
Browser.RunningBrowser.switch_to.default_content()
except Exception as e:
logger.step_normal(e)
logger.step_warning(str(sys.exc_info()))
@CompatibleMethod
def AlertTextHave(self, txt_value):
logger.step_normal("AlertTextHave [%s]" % txt_value)
alert_text = Browser.RunningBrowser.switch_to_alert().text()
if txt_value in alert_text:
logger.step_pass("pass")
else:
logger.step_fail("fail")
Browser.RunningBrowser.switch_to_default_content()
@CompatibleMethod
def DeleteAllCookies(self):
logger.step_normal("Element [%s]: Browser Delete All Cookies" % (self.__name__,))
Browser.RunningBrowser.delete_all_cookies()
time.sleep(3)
@CompatibleMethod
def IESkipCertError(self):
logger.step_normal("IE Skip SSL Cert Error.")
Browser.RunningBrowser.get("javascript:document.getElementById('overridelink').click();")
@CompatibleMethod
def NavigateTo(self, url):
logger.step_normal("Element [%s]: Navigate To [%s]" % (self.__name__, url))
Browser.RunningBrowser.get(url)
time.sleep(3)
@CompatibleMethod
def Refresh(self, times=4):
logger.step_normal("Element [%s]: Browser Refresh" % (self.__name__,))
for i in range(times):
action = webdriver.ActionChains(Browser.RunningBrowser)
action.key_down(Keys.CONTROL).send_keys(Keys.F5).key_up(Keys.CONTROL).perform()
time.sleep(5)
@CompatibleMethod
def ScrollTo(self, x, y):
logger.step_normal("Element [%s]: Scroll To [%s, %s]" % (self.__name__, x, y))
Browser.RunningBrowser.execute_script("window.scrollTo(%s, %s);" % (x, y))
@CompatibleMethod
def SwitchToDefaultContent(self):
logger.step_normal("SwitchToDefaultContent()")
try:
Browser.RunningBrowser.switch_to.default_content()
except Exception as e:
logger.step_normal(e)
logger.step_warning("Browser.RunningBrowser.switch_to.default_content()")
@CompatibleMethod
def SwitchToDefaultWindow(self):
logger.step_normal("SwitchToDefaultWindow()")
logger.step_normal("Switch To The Default Window of: %s" % str(Browser.RunningBrowser.window_handles))
try:
Browser.RunningBrowser.switch_to.window(Browser.RunningBrowser.window_handles[0])
except Exception as e:
logger.step_normal(e)
logger.step_warning("Browser.RunningBrowser.switch_to.window(Browser.RunningBrowser.window_handles[0])")
@CompatibleMethod
def SwitchToFrame(self, frame):
logger.step_normal("SwitchToFrame()")
Browser.RunningBrowser.switch_to.frame(frame)
@CompatibleMethod
def SwitchToNewPopWindow(self):
logger.step_normal("SwitchToNewPopWindow()")
t = 0
while t < 10:
t = t + 1
time.sleep(3)
if len(Browser.RunningBrowser.window_handles) < 2:
logger.step_normal("Pop Window Not Found. Wait 3 Seconds then Try Again!")
else:
break
Browser.RunningBrowser.switch_to.window(Browser.RunningBrowser.window_handles[-1])
logger.step_normal("Switch To The New Window of : %s" % str(Browser.RunningBrowser.window_handles))
@CompatibleMethod
def Wait(self, seconds):
logger.step_normal("Element [%s]: Wait for [%s] seconds." % (self.__name__, seconds))
time.sleep(seconds)
class WebElement:
(by, value) = (None, None)
index = 0
@CompatibleMethod
def __init__(self, by=None, value=None):
self.by = by
self.value = value
@CompatibleMethod
def __wait(self):
t = 0
while t < 300:
t = t + 1
try:
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
except NoSuchElementException:
logger.step_normal("Element [%s]: NoSuchElementException." % self.__name__)
elements = []
except UnexpectedAlertPresentException:
logger.step_warning("Element [%s]: UnexpectedAlertPresentException." % self.__name__)
if len(elements) == 0:
time.sleep(1)
logger.step_normal("Element [%s]: Wait 1 second, By [%s :: %s :: %s]" % (self.__name__,
self.by,
self.value,
self.index))
else:
if len(elements) > 1:
logger.step_normal("Element [%s]: There are [%s] Elements!" % (self.__name__, len(elements)))
break
if len(elements) < self.index + 1:
logger.step_fail("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (self.__name__,
len(elements),
self.index))
@CompatibleMethod
def __wait_for_appearing(self):
t = 0
while t < 120:
t = t + 1
try:
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
except NoSuchElementException:
logger.step_normal("Element [%s]: NoSuchElementException." % self.__name__)
elements = []
continue
except UnexpectedAlertPresentException:
logger.step_warning("Element [%s]: UnexpectedAlertPresentException." % self.__name__)
if len(elements) == 0:
time.sleep(0.5)
logger.step_normal("Element [%s]: WaitForAppearing... Wait 1 second, By [%s]" % (self.__name__,
self.value))
else:
logger.step_normal("Element [%s]: Found [%s] Element. Tried [%s] Times." % (self.__name__,
len(elements), t))
break
@CompatibleMethod
def __wait_for_disappearing(self):
t = 0
while t < 120:
t = t + 1
try:
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
except NoSuchElementException:
logger.step_normal("Element [%s]: NoSuchElementException." % self.__name__)
elements = []
continue
except UnexpectedAlertPresentException:
logger.step_warning("Element [%s]: UnexpectedAlertPresentException." % self.__name__)
if len(elements) == 0:
return True
else:
time.sleep(0.5)
logger.step_normal("Element [%s]: WairForDisappearing... Found [%s] Element. Tried [%s] Times." %
(self.__name__, len(elements), t))
return False
@CompatibleMethod
def __wait_for_enabled(self):
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
if elements[self.index].is_enabled():
return
else:
t = 0
while t < 90:
if elements[self.index].is_enabled():
break
logger.step_normal("Element [%s]: __wait_for_enabled for 1 second, By [%s :: %s :: %s]" %
(self.__name__, self.by, self.value, self.index))
time.sleep(1)
@CompatibleMethod
def Click(self):
logger.step_normal("Element [%s]: Click()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
try:
elements[self.index].click()
except ElementClickInterceptedException:
self.Wait(3)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.click(elements[self.index])
action.perform()
@CompatibleMethod
def ClickAndHold(self):
logger.step_normal("Element [%s]: ClickAndHold()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.click_and_hold(elements[self.index])
action.perform()
@CompatibleMethod
def DoubleClick(self):
logger.step_normal("Element [%s]: DoubleClick()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.double_click(elements[self.index])
action.perform()
@CompatibleMethod
def DragAndDropByOffset(self, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
"""
logger.step_normal("Element [%s]: drag_and_drop_by_offset()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.drag_and_drop_by_offset(elements[self.index],xoffset, yoffset)
action.perform()
@CompatibleMethod
def FetchSubElementOfXPath(self, layer):
return WebElement(self.by, "/".join(self.value.split("/")[:layer+2]))
@CompatibleMethod
def GetAttribute(self, attr):
logger.step_normal("Element [%s]: GetAttribute [%s]." % (self.__name__, attr))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
attr_value = elements[self.index].get_attribute(attr)
logger.step_normal("Element [%s]: [%s] = [%s]." % (self.__name__, attr, attr_value))
return attr_value
@CompatibleMethod
def GetInnerHTML(self):
logger.step_normal("Element [%s]: GetInnerHTML()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: InnerHTML = [%s]" % (self.__name__, elements[self.index].get_attribute('innerHTML')))
return elements[self.index].get_attribute('innerHTML')
@CompatibleMethod
def GetParentElement(self):
logger.step_normal("Element [%s]: GetParentElement()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
return elements[self.index].parent()
@CompatibleMethod
def GetRepetition(self):
logger.step_normal("Element [%s]: GetRepetition()." % self.__name__)
self.__wait_for_appearing()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: repetition = [%s]" % (self.__name__, len(elements)))
return len(elements)
@CompatibleMethod
def GetRepetitionWithoutWaiting(self):
""" Get real time obj counts, without waiting."""
logger.step_normal("Element [%s]: GetRepetitionWithoutWaiting()." % self.__name__)
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: repetition = [%s]" % (self.__name__, len(elements)))
return len(elements)
@CompatibleMethod
def GetText(self):
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: Get text of the element = %s." % (self.__name__,
elements[self.index].text))
return elements[self.index].text
@CompatibleMethod
def IsAttribute(self, attribute, value, assertion="contain"):
logger.step_normal("Element [%s]: IsAttribute [%s] <%s> [%s]?" % (self.__name__, attribute, assertion, value))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
real_value = elements[self.index].get_attribute(attribute)
result = False
if assertion.lower() == 'equal' and value == real_value:
result = True
elif assertion.lower() == 'not equal' and value != real_value:
result = True
elif assertion.lower() == 'contain' and value in real_value:
result = True
elif assertion.lower() == 'not contain' and value not in real_value:
result = True
elif assertion.lower() == 'in' and real_value in value:
result = True
else:
logger.step_fail("code error.")
if result is True:
logger.step_normal("Yes!")
else:
logger.step_normal("No!")
return result
@CompatibleMethod
def IsEnabled(self):
logger.step_normal("Element [%s]: Is Enabled?" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
if elements[self.index].is_enabled():
logger.step_normal("Yes!")
return True
else:
logger.step_normal("No!")
return False
@CompatibleMethod
def IsExist(self):
logger.step_normal("Element [%s]: IsExist?" % self.__name__)
time.sleep(2)
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: IsExist? Count = [%s]" % (self.__name__, len(elements)))
if len(elements) > 0:
return True
else:
return False
@CompatibleMethod
def IsVisible(self):
logger.step_normal("Element [%s]: IsVisible?" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
if elements[self.index].is_displayed():
logger.step_normal("Yes!")
return True
else:
logger.step_normal("No!")
return False
@CompatibleMethod
def MouseOver(self):
logger.step_normal("Element [%s]: MouseOver()" % self.__name__)
time.sleep(1)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.move_to_element(elements[self.index])
action.perform()
time.sleep(1)
@CompatibleMethod
def ReleaseClick(self):
logger.step_normal("Element [%s]: ReleaseClick()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.release(elements[self.index])
action.perform()
@CompatibleMethod
def ScrollIntoView(self):
logger.step_normal("Element [%s]: ScrollToView()" % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
i = 0
while not elements[self.index].is_displayed():
WebBrowser.ScrollTo(0, i)
i = i + 10
if i == 1000:
logger.step_normal("still not displayed. break out.")
@CompatibleMethod
def Select(self, value):
logger.step_normal("Element [%s]: Select [%s]." % (self.__name__, value))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
is_selected = False
# select
if elements[self.index].tag_name == "select":
options = elements[self.index].find_elements_by_tag_name('option')
for option in options:
logger.step_normal("Element [%s]: option [%s]" % (self.__name__, option.text))
if option.text == value:
option.click()
is_selected = True
break
# ul
elif elements[self.index].tag_name == "ul":
lis = elements[self.index].find_elements_by_tag_name('li')
for li in lis:
logger.step_normal("Element [%s]: li [%s]" % (self.__name__, li.text))
if li.text == value:
li.click()
is_selected = True
break
# not support
else:
logger.step_fail("Element [%s]: Tag [%s] NOT support [Select] method" % (self.__name__, elements[self.index].tag_name))
if is_selected is False:
logger.step_fail("No item selected!")
@CompatibleMethod
def SelectByOrder(self, order):
logger.step_normal("Element [%s]: Select by Order [%s]" % (self.__name__, order))
order = int(order)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
# ul
if elements[self.index].tag_name == "ul":
lis = elements[self.index].find_elements_by_tag_name('li')
if order > 0:
# wait and try more times if NO item found.
t = 0
while len(lis) == 0:
lis = elements[self.index].find_elements_by_tag_name('li')
time.sleep(3)
t = t + 1
logger.step_normal("Element [%s]: Wait 3 Seconds for [li]" % self.__name__)
if t == 20 and len(lis) == 0:
logger.step_fail("Element [%s]: List Count = [%s]." % (self.__name__, len(lis)))
return
logger.step_normal("Element [%s]: List Count = [%s]." % (self.__name__, len(lis)))
if order > len(lis):
logger.step_fail("Element [%s]: Not so many lists. [%s]" % (self.__name__, len(lis)))
else:
action = webdriver.ActionChains(Browser.RunningBrowser)
# Added to avoid error: "Element is no longer attached to the DOM"
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: Do Click [%s]" % (self.__name__, lis[order-1].text))
lis[order-1].click()
"""
lis = elements[self.index].find_elements_by_tag_name('li')
action.click(lis[order-1])
action.perform()
"""
else:
logger.step_fail("Order = [%s], Value Error." % order)
# select
# if elements[self.index].tag_name == "select":
else:
options = elements[self.index].find_elements_by_tag_name('option')
if order > 0:
# wait and try more times if NO item found.
t = 0
while len(options) == 0:
options = elements[self.index].find_elements_by_tag_name('option')
time.sleep(3)
t = t + 1
logger.step_normal("Element [%s]: Wait 3 Seconds for [option]" % self.__name__)
if t == 20 and len(options) == 0:
logger.step_fail("Element [%s]: options Count = [%s]." % (self.__name__, len(options)))
return
logger.step_normal("Element [%s]: options Count = [%s]." % (self.__name__, len(options)))
if order > len(options):
logger.step_fail("Element [%s]: Not so many options. [%s]" % (self.__name__, len(options)))
else:
logger.step_normal("Element [%s]: Do Click [%s]" % (self.__name__, options[order-1].text))
options[order-1].click()
"""
action = webdriver.ActionChains(Browser.RunningBrowser)
action.click()
action.perform()
"""
else:
logger.step_fail("Order = [%s], Value Error." % order)
@CompatibleMethod
def SelectByPartText(self, value):
logger.step_normal("Element [%s]: Select [%s]." % (self.__name__, value))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
is_selected = False
# select
if elements[self.index].tag_name == "select":
options = elements[self.index].find_elements_by_tag_name('option')
for option in options:
if value in option.text:
logger.step_normal("Element [%s]: Select [%s]." % (self.__name__, option.text))
option.click()
is_selected = True
break
# ul
elif elements[self.index].tag_name == "ul":
lis = elements[self.index].find_elements_by_tag_name('li')
for li in lis:
if value in li.text:
logger.step_normal("Element [%s]: Select [%s]." % (self.__name__, li.text))
li.click()
is_selected = True
break
# not support
else:
logger.step_fail("Element [%s]: Tag [%s] NOT support [Select] method" % (self.__name__, elements[self.index].tag_name))
if is_selected is False:
logger.step_fail("No item selected!")
@CompatibleMethod
def SendEnter(self):
logger.step_normal("Element [%s]: SendEnter()" % self.__name__, )
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
action = webdriver.ActionChains(Browser.RunningBrowser)
action.send_keys_to_element(elements[self.index], Keys.ENTER)
action.perform()
@CompatibleMethod
def Set(self, value):
logger.step_normal("Element [%s]: Set [%s]." % (self.__name__, value))
value = str(value)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
if elements[self.index].tag_name == "select" or elements[self.index].tag_name == "ul":
self.Select(value)
else:
elements[self.index].clear()
elements[self.index].send_keys(value)
"""
elements[self.index].clear()
action = webdriver.ActionChains(Browser.RunningBrowser)
action.send_keys_to_element(elements[self.index], value)
action.perform()
"""
@CompatibleMethod
def TypeInWithoutClear(self, value):
"""Input value without clear existing values"""
logger.step_normal("Element [%s]: TypeInWithoutClear [%s]." % self.__name__, value)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
elements[self.index].send_keys(value)
@CompatibleMethod
def VerifyAttribute(self, attribute, value, assertion='equal'):
"""
Example:
NewClaim.Dates.ReminderDate.VerifyAttribute('ng-model', 'hello', assertion='equal')
NewClaim.Dates.ReminderDate.VerifyAttribute('ng-model', 'hello', assertion='contain')
NewClaim.Dates.ReminderDate.VerifyAttribute('ng-model', 'hello', assertion='in')
NewClaim.Dates.ReminderDate.VerifyAttribute('ng-model', 'hello', assertion='not equal')
:param assertion:
in => Real value [in] 'hello'. For example: real_value is 'he'
equal => Real value [equal] 'hello'. For example: real_value is 'hello'
contain => Real value [contain] 'hello'. For example: real_value is 'hello world'.
not equal => Real value [not equal] 'hello'. For example: real_value is 'hallow'
not contain => Real value [not contain] 'hello'. For example: real_value is 'hi world'.
"""
logger.step_normal("Element [%s]: VerifyAttribute [%s] <%s> [%s]." % (self.__name__, attribute, assertion, value))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
real_value = elements[self.index].get_attribute(attribute)
if assertion.lower() == 'equal':
if value == real_value:
logger.step_pass("real value=[%s]" % real_value)
else:
logger.step_fail("real value=[%s]" % real_value)
elif assertion.lower() == 'not equal':
if not value == real_value:
logger.step_pass("real value=[%s]" % real_value)
else:
logger.step_fail("real value=[%s]" % real_value)
elif assertion.lower() == 'contain':
if value in real_value:
logger.step_pass("real value=[%s]" % real_value)
else:
logger.step_fail("real value=[%s]" % real_value)
elif assertion.lower() == 'not contain':
if not value in real_value:
logger.step_pass("real value=[%s]" % real_value)
else:
logger.step_fail("real value=[%s]" % real_value)
elif assertion.lower() == 'in':
if real_value in value:
logger.step_pass("real value=[%s]" % real_value)
else:
logger.step_fail("real value=[%s]" % real_value)
else:
logger.step_fail("code error.")
@CompatibleMethod
def VerifyEnabled(self, trueOrfalse):
logger.step_normal("Element [%s]: Verify Enabled = [%s]" % (self.__name__, trueOrfalse))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
is_disabled = elements[self.index].get_attribute("disabled")
logger.step_normal("Element [%s]: attribute 'is_disabled' = [%s]" % (self.__name__, is_disabled))
if is_disabled == "true":
if trueOrfalse is False:
logger.step_pass("Pass...")
else:
logger.step_fail("Fail...")
elif elements[self.index].is_enabled():
if trueOrfalse is True:
logger.step_pass("Pass")
else:
logger.step_fail("Fail")
else:
logger.step_fail("Not verified.")
@CompatibleMethod
def VerifyExistence(self, trueORfalse):
"""
EXAMPLE
Page.Element.VerifyExistence(True)
:param trueORfalse: True or False
:return:
"""
logger.step_normal("Element [%s]: Verify Existence = [%s]." % (self.__name__, trueORfalse))
if trueORfalse is True:
self.__wait_for_appearing()
else:
self.__wait_for_disappearing()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: Count = [%s]" % (self.__name__, len(elements)))
if len(elements) > 0:
if trueORfalse is True:
logger.step_pass("Exist!")
else:
logger.step_fail("Exist!")
else:
if trueORfalse is False:
logger.step_pass("Not Exist!")
else:
logger.step_fail("Not Exist!")
@CompatibleMethod
def VerifyInnerHTMLContains(self, content):
self.VerifyAttribute("innerHTML", content, assertion="contain")
@CompatibleMethod
def VerifyVisible(self, trueORfalse):
logger.step_normal("Element [%s]: Verify Visible = [%s]." % (self.__name__, trueORfalse))
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
logger.step_normal("Element [%s]: Count = [%s]" % (self.__name__, len(elements)))
if elements[self.index].is_displayed():
if trueORfalse is True:
logger.step_pass("Visible!")
else:
logger.step_fail("Visible!")
else:
if trueORfalse is False:
logger.step_pass("Not Visible!")
else:
logger.step_fail("Not Visible!")
@CompatibleMethod
def WaitForAppearing(self):
logger.step_normal("Element [%s]: WaitForAppearing..." % self.__name__)
self.__wait_for_appearing()
@CompatibleMethod
def WaitForAttribute(self, attribute, value, assertion="equal"):
"""
Example:
NewClaim.Dates.ReminderDate.WaitForAttribute('ng-model', 'hello', assertion='equal')
NewClaim.Dates.ReminderDate.WaitForAttribute('ng-model', 'hello', assertion='contain')
NewClaim.Dates.ReminderDate.WaitForAttribute('ng-model', 'hello', assertion='not contain')
NewClaim.Dates.ReminderDate.WaitForAttribute('ng-model', 'hello', assertion='in')
NewClaim.Dates.ReminderDate.WaitForAttribute('ng-model', 'hello', assertion='not equal')
:param assertion:
in => Real value [in] 'hello'. For example: real_value is 'he'
equal => Real value [equal] 'hello'. For example: real_value is 'hello'
contain => Real value [contain] 'hello'. For example: real_value is 'hello world'.
not equal => Real value [not equal] 'hello'. For example: real_value is 'hallow'
not contain => Real value [not contain] 'hello'. For example: real_value is 'hi world'.
"""
logger.step_normal("Element [%s]: WaitForAttribute [%s] <%s> [%s]." % (self.__name__, attribute, assertion, value))
i = 0
while True:
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
real_value = elements[self.index].get_attribute(attribute)
if assertion.lower() == 'equal':
if value.lower() == real_value.lower():
logger.step_normal("Yes! real value=[%s]" % real_value)
break
else:
logger.step_normal("No! real value=[%s]" % real_value)
elif assertion.lower() == 'contain':
if value.lower() in real_value.lower():
logger.step_normal("Yes! real value=[%s]" % real_value[:150])
break
else:
logger.step_normal("No! real value=[%s]" % real_value[:150])
elif assertion.lower() == 'not contain':
if value.lower() in real_value.lower():
logger.step_normal("Yes! real value=[%s]" % real_value[:150])
else:
logger.step_normal("No! real value=[%s]" % real_value[:150])
break
elif assertion.lower() == 'in':
if real_value.lower() in value.lower():
logger.step_normal("Yes! real value=[%s]" % real_value[:150])
break
else:
logger.step_normal("No! real value=[%s]" % real_value[:150])
elif assertion.lower() == 'not equal':
if value.lower() == real_value.lower():
logger.step_normal("No! real value=[%s]" % real_value)
else:
logger.step_normal("Yes! real value=[%s]" % real_value)
break
else:
logger.step_fail("code error.")
i = i + 1
if i > 90:
logger.step_fail("Not Found Expected Value! real value=[%s]" % real_value)
break
time.sleep(1)
@CompatibleMethod
def WaitForDisappearing(self):
logger.step_normal("Element [%s]: WaitForDisappearing..." % self.__name__)
self.__wait_for_disappearing()
@CompatibleMethod
def WaitForEnabled(self):
logger.step_normal("Element [%s]: WaitForEnabled..." % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
t = 0
while t < 90:
if elements[self.index].is_enabled():
logger.step_normal("Element [%s]: is enabled now." % self.__name__)
break
else:
logger.step_normal("Element [%s]: still NOT enabled, wait 1 second." % self.__name__)
time.sleep(1)
t = t + 1
@CompatibleMethod
def WaitForVisible(self):
logger.step_normal("Element [%s]: WaitForVisible..." % self.__name__)
self.__wait()
elements = Browser.RunningBrowser.find_elements(self.by, self.value)
t = 0
while t < 90:
if elements[self.index].is_displayed():
logger.step_normal("Element [%s]: IS visible now." % self.__name__)
break
else:
logger.step_normal("Element [%s]: Still NOT visible, wait 1 second." % self.__name__)
time.sleep(1)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
HPELeftHand HTTP Client
.. module: http
:Author: Walter A. Boring IV
:Description: This is the HTTP Client that is used to make the actual calls.
It includes the authentication that knows the cookie name for LH.
"""
import logging
import requests
import time
try:
import json
except ImportError:
import simplejson as json
from hpelefthandclient import exceptions
class HTTPJSONRESTClient(object):
"""
An HTTP REST Client that sends and recieves JSON data as the body of the
HTTP request.
:param api_url: The url to the LH OS REST service
ie. https://<hostname or IP>:<port>/lhos
:type api_url: str
:param secure: Validate SSL cert? Default will not validate
:type secure: bool
:param http_log_debug: Turns on http log debugging. Default will not log
:type http_log_debug: bool
:param suppress_ssl_warnings: Suppresses log warning messages if True.
Default will not suppress warnings.
:type suppress_ssl_warnings: bool
"""
USER_AGENT = 'python-hpelefthandclient'
SESSION_COOKIE_NAME = 'Authorization'
#API_VERSION = 'X-API-Version'
#CHRP_VERSION = 'X_HPE-CHRP-Client-Version'
http_log_debug = False
_logger = logging.getLogger(__name__)
def __init__(self, api_url, secure=False, http_log_debug=False,
suppress_ssl_warnings=False, timeout=None):
if suppress_ssl_warnings:
requests.packages.urllib3.disable_warnings()
self.session_key = None
#should be http://<Server:Port>/lhos
self.set_url(api_url)
self.set_debug_flag(http_log_debug)
self.times = [] # [("item", starttime, endtime), ...]
self.secure = secure
self.timeout = timeout
def set_url(self, api_url):
#should be http://<Server:Port>/lhos
self.api_url = api_url.rstrip('/')
def set_debug_flag(self, flag):
"""
This turns on/off http request/response debugging output to console
:param flag: Set to True to enable debugging output
:type flag: bool
"""
if not HTTPJSONRESTClient.http_log_debug and flag:
ch = logging.StreamHandler()
HTTPJSONRESTClient._logger.setLevel(logging.DEBUG)
HTTPJSONRESTClient._logger.addHandler(ch)
HTTPJSONRESTClient.http_log_debug = True
def authenticate(self, user, password, optional=None):
"""
This tries to create an authenticated session with the LH OS server
:param user: The username
:type user: str
:param password: The password
:type password: str
"""
#this prevens re-auth attempt if auth fails
self.auth_try = 1
self.session_key = None
info = {'user': user, 'password': password}
self._auth_optional = None
if optional:
self._auth_optional = optional
info.update(optional)
resp, body = self.post('/credentials', body=info)
if body and 'authToken' in body:
self.session_key = body['authToken']
self.auth_try = 0
self.user = user
self.password = password
return resp
def _reauth(self):
self.authenticate(self.user, self.password, self._auth_optional)
def unauthenticate(self):
"""
This clears the authenticated session with the LH server. It logs out.
"""
#delete the session on the LH
self.delete('/credentials/%s' % self.session_key)
self.session_key = None
def get_timings(self):
"""
Ths gives an array of the request timings since last reset_timings call
"""
return self.times
def reset_timings(self):
"""
This resets the request/response timings array
"""
self.times = []
def _http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
HTTPJSONRESTClient._logger.debug("\nREQ: %s\n" % "".join(string_parts))
if 'body' in kwargs:
HTTPJSONRESTClient._logger.debug("REQ BODY: %s\n"
% (kwargs['body']))
def _http_log_resp(self, resp, body):
if not self.http_log_debug:
return
HTTPJSONRESTClient._logger.debug("RESP:%s\n",
str(resp).replace('\',', '\'\n'))
HTTPJSONRESTClient._logger.debug("RESP BODY:%s\n", body)
def request(self, *args, **kwargs):
"""
This makes an HTTP Request to the LH server. You should use get, post,
delete instead.
"""
if self.session_key and self.auth_try != 1:
kwargs.setdefault('headers',
{})[self.SESSION_COOKIE_NAME] = self.session_key
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['body'] = json.dumps(kwargs['body'])
payload = kwargs['body']
else:
payload = None
# args[0] contains the URL, args[1] contains the HTTP verb/method
http_url = args[0]
http_method = args[1]
self._http_log_req(args, kwargs)
try:
if self.timeout:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure,
timeout=self.timeout)
else:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure)
except requests.exceptions.SSLError as err:
HTTPJSONRESTClient._logger.error("SSL certificate verification"
" failed: (%s). You must have a"
" valid SSL certificate or"
" disable SSL verification.", err)
raise exceptions.SSLCertFailed("SSL Certificate Verification"
" Failed")
except requests.exceptions.RequestException as err:
raise exceptions.RequestException("Request Exception: %s" % err)
except requests.exceptions.ConnectionError as err:
raise exceptions.ConnectionError("Connection Error: %s" % err)
except requests.exceptions.HTTPError as err:
raise exceptions.HTTPError("HTTP Error: %s" % err)
except requests.exceptions.URLRequired as err:
raise exceptions.URLRequired("URL Required: %s" % err)
except requests.exceptions.TooManyRedirects as err:
raise exceptions.TooManyRedirects("Too Many Redirects: %s" % err)
except requests.exceptions.Timeout as err:
raise exceptions.Timeout("Timeout: %s" % err)
resp = r.headers
body = r.text
if isinstance(body, bytes):
body = body.decode('utf-8')
# resp['status'], status['content-location'], and resp.status need to
# be manually set as Python Requests doesnt provide them automatically
resp['status'] = str(r.status_code)
resp.status = r.status_code
if 'location' not in resp:
resp['content-location'] = r.url
r.close()
self._http_log_resp(resp, body)
# Try and conver the body response to an object
# This assumes the body of the reply is JSON
if body:
try:
body = json.loads(body)
except ValueError:
#pprint.pprint("failed to decode json\n")
pass
else:
body = None
if resp.status >= 400:
if body and 'message' in body:
body['desc'] = body['message']
raise exceptions.from_response(resp, body)
return resp, body
def _time_request(self, url, method, **kwargs):
start_time = time.time()
resp, body = self.request(url, method, **kwargs)
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
return resp, body
def _do_reauth(self, url, method, ex, **kwargs):
print("_do_reauth called")
try:
if self.auth_try != 1:
self._reauth()
resp, body = self._time_request(self.api_url + url,
method, **kwargs)
return resp, body
else:
raise ex
except exceptions.HTTPUnauthorized:
raise ex
def _cs_request(self, url, method, **kwargs):
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
resp, body = self._time_request(self.api_url + url, method,
**kwargs)
return resp, body
except exceptions.HTTPUnauthorized as ex:
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
except exceptions.HTTPForbidden as ex:
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
def get(self, url, **kwargs):
"""
Make an HTTP GET request to the server.
.. code-block:: python
#example call
try {
headers, body = http.get('/volumes')
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the LH api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
"""
Make an HTTP POST request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'new volume name', 'sizeMiB': 300}
headers, body = http.post('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the LH api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
"""
Make an HTTP PUT request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'something'}
headers, body = http.put('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the LH api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
"""
Make an HTTP DELETE request to the server.
.. code-block:: python
#example call
try {
headers, body = http.delete('/volumes/%s' % name)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the LH api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'DELETE', **kwargs)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Blog.block_other_4'
db.delete_column('blogs_blog', 'block_other_4')
# Deleting field 'Blog.block_other_5'
db.delete_column('blogs_blog', 'block_other_5')
# Adding field 'Blog.block_subscribe_text'
db.add_column('blogs_blog', 'block_subscribe_text',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
# Adding field 'Blog.block_subscribe_button'
db.add_column('blogs_blog', 'block_subscribe_button',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Blog.block_other_4'
db.add_column('blogs_blog', 'block_other_4',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
# Adding field 'Blog.block_other_5'
db.add_column('blogs_blog', 'block_other_5',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
# Deleting field 'Blog.block_subscribe_text'
db.delete_column('blogs_blog', 'block_subscribe_text')
# Deleting field 'Blog.block_subscribe_button'
db.delete_column('blogs_blog', 'block_subscribe_button')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'block_css': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_footer': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_header': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_middle': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_navbar': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_bottom': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_top': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_button': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_title': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'has_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
| |
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union
from prometheus_client import Counter
from twisted.internet import defer
import synapse
from synapse.api.constants import EventTypes
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics import (
event_processing_loop_counter,
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.storage.databases.main.directory import RoomAliasMapping
from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
class ApplicationServicesHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.is_mine_id = hs.is_mine_id
self.appservice_api = hs.get_application_service_api()
self.scheduler = hs.get_application_service_scheduler()
self.started_scheduler = False
self.clock = hs.get_clock()
self.notify_appservices = hs.config.appservice.notify_appservices
self.event_sources = hs.get_event_sources()
self._msc2409_to_device_messages_enabled = (
hs.config.experimental.msc2409_to_device_messages_enabled
)
self.current_max = 0
self.is_processing = False
self._ephemeral_events_linearizer = Linearizer(
name="appservice_ephemeral_events"
)
def notify_interested_services(self, max_token: RoomStreamToken) -> None:
"""Notifies (pushes) all application services interested in this event.
Pushing is done asynchronously, so this method won't block for any
prolonged length of time.
"""
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
current_id = max_token.stream
services = self.store.get_app_services()
if not services or not self.notify_appservices:
return
self.current_max = max(self.current_max, current_id)
if self.is_processing:
return
# We only start a new background process if necessary rather than
# optimistically (to cut down on overhead).
self._notify_interested_services(max_token)
@wrap_as_background_process("notify_interested_services")
async def _notify_interested_services(self, max_token: RoomStreamToken) -> None:
with Measure(self.clock, "notify_interested_services"):
self.is_processing = True
try:
limit = 100
upper_bound = -1
while upper_bound < self.current_max:
(
upper_bound,
events,
) = await self.store.get_new_events_for_appservice(
self.current_max, limit
)
events_by_room: Dict[str, List[EventBase]] = {}
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
async def handle_event(event: EventBase) -> None:
# Gather interested services
services = await self._get_services_for_event(event)
if len(services) == 0:
return # no services need notifying
# Do we know this user exists? If not, poke the user
# query API for all services which match that user regex.
# This needs to block as these user queries need to be
# made BEFORE pushing the event.
await self._check_user_exists(event.sender)
if event.type == EventTypes.Member:
await self._check_user_exists(event.state_key)
if not self.started_scheduler:
async def start_scheduler() -> None:
try:
await self.scheduler.start()
except Exception:
logger.error("Application Services Failure")
run_as_background_process("as_scheduler", start_scheduler)
self.started_scheduler = True
# Fork off pushes to these services
for service in services:
self.scheduler.enqueue_for_appservice(
service, events=[event]
)
now = self.clock.time_msec()
ts = await self.store.get_received_ts(event.event_id)
assert ts is not None
synapse.metrics.event_processing_lag_by_event.labels(
"appservice_sender"
).observe((now - ts) / 1000)
async def handle_room_events(events: Iterable[EventBase]) -> None:
for event in events:
await handle_event(event)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
for evs in events_by_room.values()
],
consumeErrors=True,
)
)
await self.store.set_appservice_last_pos(upper_bound)
synapse.metrics.event_processing_positions.labels(
"appservice_sender"
).set(upper_bound)
events_processed_counter.inc(len(events))
event_processing_loop_room_count.labels("appservice_sender").inc(
len(events_by_room)
)
event_processing_loop_counter.labels("appservice_sender").inc()
if events:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
"appservice_sender"
).set(now - ts)
synapse.metrics.event_processing_last_ts.labels(
"appservice_sender"
).set(ts)
finally:
self.is_processing = False
def notify_interested_services_ephemeral(
self,
stream_key: str,
new_token: Union[int, RoomStreamToken],
users: Collection[Union[str, UserID]],
) -> None:
"""
This is called by the notifier in the background when an ephemeral event is handled
by the homeserver.
This will determine which appservices are interested in the event, and submit them.
Args:
stream_key: The stream the event came from.
`stream_key` can be "typing_key", "receipt_key", "presence_key" or
"to_device_key". Any other value for `stream_key` will cause this function
to return early.
Ephemeral events will only be pushed to appservices that have opted into
receiving them by setting `push_ephemeral` to true in their registration
file. Note that while MSC2409 is experimental, this option is called
`de.sorunome.msc2409.push_ephemeral`.
Appservices will only receive ephemeral events that fall within their
registered user and room namespaces.
new_token: The stream token of the event.
users: The users that should be informed of the new event, if any.
"""
if not self.notify_appservices:
return
# Notify appservices of updates in ephemeral event streams.
# Only the following streams are currently supported.
# FIXME: We should use constants for these values.
if stream_key not in (
"typing_key",
"receipt_key",
"presence_key",
"to_device_key",
):
return
# Assert that new_token is an integer (and not a RoomStreamToken).
# All of the supported streams that this function handles use an
# integer to track progress (rather than a RoomStreamToken - a
# vector clock implementation) as they don't support multiple
# stream writers.
#
# As a result, we simply assert that new_token is an integer.
# If we do end up needing to pass a RoomStreamToken down here
# in the future, using RoomStreamToken.stream (the minimum stream
# position) to convert to an ascending integer value should work.
# Additional context: https://github.com/matrix-org/synapse/pull/11137
assert isinstance(new_token, int)
# Ignore to-device messages if the feature flag is not enabled
if (
stream_key == "to_device_key"
and not self._msc2409_to_device_messages_enabled
):
return
# Check whether there are any appservices which have registered to receive
# ephemeral events.
#
# Note that whether these events are actually relevant to these appservices
# is decided later on.
services = [
service
for service in self.store.get_app_services()
if service.supports_ephemeral
]
if not services:
# Bail out early if none of the target appservices have explicitly registered
# to receive these ephemeral events.
return
# We only start a new background process if necessary rather than
# optimistically (to cut down on overhead).
self._notify_interested_services_ephemeral(
services, stream_key, new_token, users
)
@wrap_as_background_process("notify_interested_services_ephemeral")
async def _notify_interested_services_ephemeral(
self,
services: List[ApplicationService],
stream_key: str,
new_token: int,
users: Collection[Union[str, UserID]],
) -> None:
logger.debug("Checking interested services for %s", stream_key)
with Measure(self.clock, "notify_interested_services_ephemeral"):
for service in services:
if stream_key == "typing_key":
# Note that we don't persist the token (via set_appservice_stream_type_pos)
# for typing_key due to performance reasons and due to their highly
# ephemeral nature.
#
# Instead we simply grab the latest typing updates in _handle_typing
# and, if they apply to this application service, send it off.
events = await self._handle_typing(service, new_token)
if events:
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
continue
# Since we read/update the stream position for this AS/stream
with (
await self._ephemeral_events_linearizer.queue(
(service.id, stream_key)
)
):
if stream_key == "receipt_key":
events = await self._handle_receipts(service, new_token)
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
# Persist the latest handled stream token for this appservice
await self.store.set_appservice_stream_type_pos(
service, "read_receipt", new_token
)
elif stream_key == "presence_key":
events = await self._handle_presence(service, users, new_token)
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
# Persist the latest handled stream token for this appservice
await self.store.set_appservice_stream_type_pos(
service, "presence", new_token
)
elif stream_key == "to_device_key":
# Retrieve a list of to-device message events, as well as the
# maximum stream token of the messages we were able to retrieve.
to_device_messages = await self._get_to_device_messages(
service, new_token, users
)
self.scheduler.enqueue_for_appservice(
service, to_device_messages=to_device_messages
)
# Persist the latest handled stream token for this appservice
await self.store.set_appservice_stream_type_pos(
service, "to_device", new_token
)
async def _handle_typing(
self, service: ApplicationService, new_token: int
) -> List[JsonDict]:
"""
Return the typing events since the given stream token that the given application
service should receive.
First fetch all typing events between the given typing stream token (non-inclusive)
and the latest typing event stream token (inclusive). Then return only those typing
events that the given application service may be interested in.
Args:
service: The application service to check for which events it should receive.
new_token: A typing event stream token.
Returns:
A list of JSON dictionaries containing data derived from the typing events that
should be sent to the given application service.
"""
typing_source = self.event_sources.sources.typing
# Get the typing events from just before current
typing, _ = await typing_source.get_new_events_as(
service=service,
# For performance reasons, we don't persist the previous
# token in the DB and instead fetch the latest typing event
# for appservices.
# TODO: It'd likely be more efficient to simply fetch the
# typing event with the given 'new_token' stream token and
# check if the given service was interested, rather than
# iterating over all typing events and only grabbing the
# latest few.
from_key=new_token - 1,
)
return typing
async def _handle_receipts(
self, service: ApplicationService, new_token: Optional[int]
) -> List[JsonDict]:
"""
Return the latest read receipts that the given application service should receive.
First fetch all read receipts between the last receipt stream token that this
application service should have previously received (non-inclusive) and the
latest read receipt stream token (inclusive). Then from that set, return only
those read receipts that the given application service may be interested in.
Args:
service: The application service to check for which events it should receive.
new_token: A receipts event stream token. Purely used to double-check that the
from_token we pull from the database isn't greater than or equal to this
token. Prevents accidentally duplicating work.
Returns:
A list of JSON dictionaries containing data derived from the read receipts that
should be sent to the given application service.
"""
from_key = await self.store.get_type_stream_id_for_appservice(
service, "read_receipt"
)
if new_token is not None and new_token <= from_key:
logger.debug(
"Rejecting token lower than or equal to stored: %s" % (new_token,)
)
return []
receipts_source = self.event_sources.sources.receipt
receipts, _ = await receipts_source.get_new_events_as(
service=service, from_key=from_key
)
return receipts
async def _handle_presence(
self,
service: ApplicationService,
users: Collection[Union[str, UserID]],
new_token: Optional[int],
) -> List[JsonDict]:
"""
Return the latest presence updates that the given application service should receive.
First, filter the given users list to those that the application service is
interested in. Then retrieve the latest presence updates since the
the last-known previously received presence stream token for the given
application service. Return those presence updates.
Args:
service: The application service that ephemeral events are being sent to.
users: The users that should receive the presence update.
new_token: A presence update stream token. Purely used to double-check that the
from_token we pull from the database isn't greater than or equal to this
token. Prevents accidentally duplicating work.
Returns:
A list of json dictionaries containing data derived from the presence events
that should be sent to the given application service.
"""
events: List[JsonDict] = []
presence_source = self.event_sources.sources.presence
from_key = await self.store.get_type_stream_id_for_appservice(
service, "presence"
)
if new_token is not None and new_token <= from_key:
logger.debug(
"Rejecting token lower than or equal to stored: %s" % (new_token,)
)
return []
for user in users:
if isinstance(user, str):
user = UserID.from_string(user)
interested = await service.is_interested_in_presence(user, self.store)
if not interested:
continue
presence_events, _ = await presence_source.get_new_events(
user=user,
from_key=from_key,
)
time_now = self.clock.time_msec()
events.extend(
{
"type": "m.presence",
"sender": event.user_id,
"content": format_user_presence_state(
event, time_now, include_user_id=False
),
}
for event in presence_events
)
return events
async def _get_to_device_messages(
self,
service: ApplicationService,
new_token: int,
users: Collection[Union[str, UserID]],
) -> List[JsonDict]:
"""
Given an application service, determine which events it should receive
from those between the last-recorded to-device message stream token for this
appservice and the given stream token.
Args:
service: The application service to check for which events it should receive.
new_token: The latest to-device event stream token.
users: The users to be notified for the new to-device messages
(ie, the recipients of the messages).
Returns:
A list of JSON dictionaries containing data derived from the to-device events
that should be sent to the given application service.
"""
# Get the stream token that this application service has processed up until
from_key = await self.store.get_type_stream_id_for_appservice(
service, "to_device"
)
# Filter out users that this appservice is not interested in
users_appservice_is_interested_in: List[str] = []
for user in users:
# FIXME: We should do this farther up the call stack. We currently repeat
# this operation in _handle_presence.
if isinstance(user, UserID):
user = user.to_string()
if service.is_interested_in_user(user):
users_appservice_is_interested_in.append(user)
if not users_appservice_is_interested_in:
# Return early if the AS was not interested in any of these users
return []
# Retrieve the to-device messages for each user
recipient_device_to_messages = await self.store.get_messages_for_user_devices(
users_appservice_is_interested_in,
from_key,
new_token,
)
# According to MSC2409, we'll need to add 'to_user_id' and 'to_device_id' fields
# to the event JSON so that the application service will know which user/device
# combination this messages was intended for.
#
# So we mangle this dict into a flat list of to-device messages with the relevant
# user ID and device ID embedded inside each message dict.
message_payload: List[JsonDict] = []
for (
user_id,
device_id,
), messages in recipient_device_to_messages.items():
for message_json in messages:
# Remove 'message_id' from the to-device message, as it's an internal ID
message_json.pop("message_id", None)
message_payload.append(
{
"to_user_id": user_id,
"to_device_id": device_id,
**message_json,
}
)
return message_payload
async def query_user_exists(self, user_id: str) -> bool:
"""Check if any application service knows this user_id exists.
Args:
user_id: The user to query if they exist on any AS.
Returns:
True if this user exists on at least one application service.
"""
user_query_services = self._get_services_for_user(user_id=user_id)
for user_service in user_query_services:
is_known_user = await self.appservice_api.query_user(user_service, user_id)
if is_known_user:
return True
return False
async def query_room_alias_exists(
self, room_alias: RoomAlias
) -> Optional[RoomAliasMapping]:
"""Check if an application service knows this room alias exists.
Args:
room_alias: The room alias to query.
Returns:
RoomAliasMapping or None if no association can be found.
"""
room_alias_str = room_alias.to_string()
services = self.store.get_app_services()
alias_query_services = [
s for s in services if (s.is_room_alias_in_namespace(room_alias_str))
]
for alias_service in alias_query_services:
is_known_alias = await self.appservice_api.query_alias(
alias_service, room_alias_str
)
if is_known_alias:
# the alias exists now so don't query more ASes.
return await self.store.get_association_from_room_alias(room_alias)
return None
async def query_3pe(
self, kind: str, protocol: str, fields: Dict[bytes, List[bytes]]
) -> List[JsonDict]:
services = self._get_services_for_3pn(protocol)
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background(
self.appservice_api.query_3pe, service, kind, protocol, fields
)
for service in services
],
consumeErrors=True,
)
)
ret = []
for (success, result) in results:
if success:
ret.extend(result)
return ret
async def get_3pe_protocols(
self, only_protocol: Optional[str] = None
) -> Dict[str, JsonDict]:
services = self.store.get_app_services()
protocols: Dict[str, List[JsonDict]] = {}
# Collect up all the individual protocol responses out of the ASes
for s in services:
for p in s.protocols:
if only_protocol is not None and p != only_protocol:
continue
if p not in protocols:
protocols[p] = []
info = await self.appservice_api.get_3pe_protocol(s, p)
if info is not None:
protocols[p].append(info)
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
# Merge the 'instances' lists of multiple results, but just take
# the other fields from the first as they ought to be identical
# copy the result so as not to corrupt the cached one
combined = dict(infos[0])
combined["instances"] = list(combined["instances"])
for info in infos[1:]:
combined["instances"].extend(info["instances"])
return combined
return {
p: _merge_instances(protocols[p]) for p in protocols.keys() if protocols[p]
}
async def _get_services_for_event(
self, event: EventBase
) -> List[ApplicationService]:
"""Retrieve a list of application services interested in this event.
Args:
event: The event to check.
Returns:
A list of services interested in this event based on the service regex.
"""
services = self.store.get_app_services()
# we can't use a list comprehension here. Since python 3, list
# comprehensions use a generator internally. This means you can't yield
# inside of a list comprehension anymore.
interested_list = []
for s in services:
if await s.is_interested_in_event(event.event_id, event, self.store):
interested_list.append(s)
return interested_list
def _get_services_for_user(self, user_id: str) -> List[ApplicationService]:
services = self.store.get_app_services()
return [s for s in services if (s.is_interested_in_user(user_id))]
def _get_services_for_3pn(self, protocol: str) -> List[ApplicationService]:
services = self.store.get_app_services()
return [s for s in services if s.is_interested_in_protocol(protocol)]
async def _is_unknown_user(self, user_id: str) -> bool:
if not self.is_mine_id(user_id):
# we don't know if they are unknown or not since it isn't one of our
# users. We can't poke ASes.
return False
user_info = await self.store.get_user_by_id(user_id)
if user_info:
return False
# user not found; could be the AS though, so check.
services = self.store.get_app_services()
service_list = [s for s in services if s.sender == user_id]
return len(service_list) == 0
async def _check_user_exists(self, user_id: str) -> bool:
unknown_user = await self._is_unknown_user(user_id)
if unknown_user:
return await self.query_user_exists(user_id)
return True
| |
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Utilities for serializing and deserializing XLA code."""
from typing import List, Optional
from jax.lib import xla_client
import numpy as np
from google.protobuf import any_pb2
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
_HLO_MODULE_PROTO_URI = 'type.googleapis.com/xla.HloModuleProto'
def pack_xla_computation(xla_computation):
"""Pack a `XlaComputation` into `Any` proto with a HLO module proto payload.
Args:
xla_computation: An instance of `xla_client.XlaComputation` to pack.
Returns:
A `google.protobuf.Any` protocol buffer message containing this
computation's `HloModuleProto` in a binary-serialized form.
Raises:
TypeError: if `xla_computation` is not an `xla_client.XlaComputation`.
"""
py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
return any_pb2.Any(
type_url=_HLO_MODULE_PROTO_URI,
value=xla_computation.as_serialized_hlo_module_proto())
def unpack_xla_computation(any_pb):
"""Unpacks an `Any` proto to an `XlaComputation`.
Args:
any_pb: An instance of `google.protobuf.Any` to unpack.
Returns:
The unpacked instance of `xla_client.XlaComputation`.
Raises:
TypeError: if `any_pb` is not an `Any` protocol buffer message.
ValueError: if the object packed into `any_pb` cannot be unpacked.
"""
py_typecheck.check_type(any_pb, any_pb2.Any)
if any_pb.type_url != _HLO_MODULE_PROTO_URI:
raise ValueError('Not a serialized `HloModuleProto`: {}.'.format(
str(any_pb.type_url)))
return xla_client.XlaComputation(any_pb.value)
def _make_xla_binding_for_type(
tensor_indexes: List[int],
type_spec: Optional[computation_types.Type]) -> Optional[pb.Xla.Binding]:
"""Generates an XLA binding for TFF type `type_spec`.
In the generated binding, tensors are assigned indexes in consecutive order
of DFS traversal.
Args:
tensor_indexes: The list of tensor indexes to use in the binding, in the
order matching the order of flattened `type_spec`.
type_spec: The type to generate the binding for. Must be either an instance
of `computation_types.Type`, or `None`.
Returns:
The generated binding (either `pb.Xla.Binding` or `None`).
"""
if type_spec is None:
return None
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(tensor_indexes, list)
def _make_starting_at_index(type_spec, idx):
if isinstance(type_spec, computation_types.TensorType):
return pb.Xla.Binding(
tensor=pb.Xla.TensorBinding(index=tensor_indexes[idx])), idx + 1
if isinstance(type_spec, computation_types.StructType):
elements = []
for _, v in structure.iter_elements(type_spec):
binding, idx = _make_starting_at_index(v, idx)
elements.append(binding)
return pb.Xla.Binding(struct=pb.Xla.StructBinding(element=elements)), idx
raise NotImplementedError('XLA bindings for {} are unsupported'.format(
str(type_spec)))
binding, _ = _make_starting_at_index(type_spec, 0)
return binding
def _remove_struct_element_names_from_tff_type(type_spec):
"""Removes names of struct elements from `type_spec`.
Args:
type_spec: An instance of `computation_types.Type` that must be a tensor, a
(possibly) nested structure of tensors, or a function.
Returns:
A modified version of `type_spec` with element names in stuctures removed.
Raises:
TypeError: if arg is of the wrong type.
"""
if type_spec is None:
return None
if isinstance(type_spec, computation_types.FunctionType):
return computation_types.FunctionType(
_remove_struct_element_names_from_tff_type(type_spec.parameter),
_remove_struct_element_names_from_tff_type(type_spec.result))
if isinstance(type_spec, computation_types.TensorType):
return type_spec
py_typecheck.check_type(type_spec, computation_types.StructType)
return computation_types.StructType([
(None, _remove_struct_element_names_from_tff_type(v))
for _, v in structure.iter_elements(type_spec)
])
def create_xla_tff_computation(xla_computation, tensor_indexes, type_spec):
"""Creates an XLA TFF computation.
Args:
xla_computation: An instance of `xla_client.XlaComputation`.
tensor_indexes: The list of tensor indexes to use in the parameter binding,
in the order matching the order of flattened parameter in `type_spec`.
type_spec: The TFF type of the computation to be constructed.
Returns:
An instance of `pb.Computation`.
Raises:
ValueError: if the arguments are invalid or incompatible with each other,
e.g., because the TFF types mismatch.
"""
py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
py_typecheck.check_type(tensor_indexes, list)
py_typecheck.check_type(type_spec, computation_types.FunctionType)
parameter_binding = _make_xla_binding_for_type(tensor_indexes,
type_spec.parameter)
result_binding = _make_xla_binding_for_type(
list(range(len(structure.flatten(type_spec.result)))), type_spec.result)
reconstructed_type = xla_computation_and_bindings_to_tff_type(
xla_computation, parameter_binding, result_binding)
py_typecheck.check_type(reconstructed_type, computation_types.FunctionType)
expected_type = _remove_struct_element_names_from_tff_type(type_spec)
if not reconstructed_type.is_equivalent_to(expected_type):
raise ValueError(
'The TFF type of the XLA computation {} does not match the expected '
'TFF type {}.'.format(str(reconstructed_type), str(expected_type)))
return pb.Computation(
type=type_serialization.serialize_type(type_spec),
xla=pb.Xla(
hlo_module=pack_xla_computation(xla_computation),
parameter=parameter_binding,
result=result_binding))
def xla_computation_and_bindings_to_tff_type(xla_computation, parameter_binding,
result_binding):
"""Constructs the TFF type from an `xla_client.XlaComputation` and bindings.
NOTE: This is a helper function, primarily intended for use in checking the
well-formedness of TFF computations during serialization and deserialization,
and for serialization testing/debugging purposes.
Args:
xla_computation: An instance of `xla_client.XlaComputation` to get type for.
parameter_binding: An instance of `pb.Xla.Binding` for the parameter.
result_binding: An instance of `pb.Xla.Binding` for the result.
Returns:
An instance of `computation_types.Type`.
"""
py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
program_shape = xla_computation.program_shape()
return computation_types.FunctionType(
xla_shapes_and_binding_to_tff_type(program_shape.parameter_shapes(),
parameter_binding),
xla_shapes_and_binding_to_tff_type([program_shape.result_shape()],
result_binding))
def xla_shapes_and_binding_to_tff_type(xla_shapes, binding):
"""Constructs the TFF type from a list of `xla_client.Shape` and a binding.
Args:
xla_shapes: A list of `xla_client.Shape` instances.
binding: An instance of `pb.Xla.Binding` (or `None` if there's none).
Returns:
An instance of `computation_types.Type` (or `None`).
"""
py_typecheck.check_type(xla_shapes, list)
if binding is not None:
py_typecheck.check_type(binding, pb.Xla.Binding)
tensor_shapes = []
for shape in xla_shapes:
tensor_shapes += flatten_xla_shape(shape)
unused_shape_indexes = set(range(len(tensor_shapes)))
def _get_type(binding):
if binding is None:
return None
kind = binding.WhichOneof('binding')
if kind == 'tensor':
index = binding.tensor.index
if (index < 0) or (index >= len(tensor_shapes)):
raise ValueError(
'Binding refers to an inexistent index {}.'.format(index))
if index not in unused_shape_indexes:
raise ValueError(
'Duplicate bindings referring to index {}.'.format(index))
unused_shape_indexes.remove(index)
shape = tensor_shapes[index]
return computation_types.TensorType(shape.numpy_dtype(),
shape.dimensions())
if kind == 'struct':
return computation_types.StructType([
(None, _get_type(x)) for x in binding.struct.element
])
if kind is None:
return None
raise ValueError('Unrecognized binding type {}.'.format(kind))
tff_type = _get_type(binding)
if unused_shape_indexes:
raise ValueError(
'Binding fails to capture tensors {}.'.format(unused_shape_indexes))
return tff_type
def flatten_xla_shape(xla_shape):
"""Flattens a possibly nested tuple XLA shape into a list of tensor shapes.
Args:
xla_shape: An instance of `xla_client.Shape` (could be a nested structure).
Returns:
A Python list of `xla_client.Shape` instances representing tensors.
"""
py_typecheck.check_type(xla_shape, xla_client.Shape)
if xla_shape.is_tuple():
tensor_shapes = []
for shape in xla_shape.tuple_shapes():
tensor_shapes += flatten_xla_shape(shape)
return tensor_shapes
else:
# Must be a tensor (array) type; verify this by probing for dimensions and
# element_type, since there's no explicit way to check otherwise.
py_typecheck.check_type(xla_shape.element_type(), np.dtype)
py_typecheck.check_type(xla_shape.dimensions(), tuple)
return [xla_shape]
| |
"""
sphinx.ext.imgmath
~~~~~~~~~~~~~~~~~~
Render math in HTML via dvipng or dvisvgm.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import posixpath
import re
import shutil
import subprocess
import sys
import tempfile
from os import path
from subprocess import PIPE, CalledProcessError
from typing import Any, Dict, List, Tuple
from docutils import nodes
from docutils.nodes import Element
import sphinx
from sphinx import package_dir
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import Config
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging, sha1
from sphinx.util.math import get_node_equation_number, wrap_displaymath
from sphinx.util.osutil import ensuredir
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.template import LaTeXRenderer
from sphinx.writers.html import HTMLTranslator
logger = logging.getLogger(__name__)
templates_path = path.join(package_dir, 'templates', 'imgmath')
class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg: str, stderr: bytes = None, stdout: bytes = None) -> None:
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys.getdefaultencoding(), 'replace')
if stdout:
msg += '\n[stdout]\n' + stdout.decode(sys.getdefaultencoding(), 'replace')
super().__init__(msg)
class InvokeError(SphinxError):
"""errors on invoking converters."""
SUPPORT_FORMAT = ('png', 'svg')
depth_re = re.compile(r'\[\d+ depth=(-?\d+)\]')
depthsvg_re = re.compile(r'.*, depth=(.*)pt')
depthsvgcomment_re = re.compile(r'<!-- DEPTH=(-?\d+) -->')
def read_svg_depth(filename: str) -> int:
"""Read the depth from comment at last line of SVG file
"""
with open(filename) as f:
for line in f:
pass
# Only last line is checked
matched = depthsvgcomment_re.match(line)
if matched:
return int(matched.group(1))
return None
def write_svg_depth(filename: str, depth: int) -> None:
"""Write the depth to SVG file as a comment at end of file
"""
with open(filename, 'a') as f:
f.write('\n<!-- DEPTH=%s -->' % depth)
def generate_latex_macro(image_format: str,
math: str, config: Config, confdir: str = '') -> str:
"""Generate LaTeX macro."""
variables = {
'fontsize': config.imgmath_font_size,
'baselineskip': int(round(config.imgmath_font_size * 1.2)),
'preamble': config.imgmath_latex_preamble,
'tightpage': '' if image_format == 'png' else ',tightpage',
'math': math
}
if config.imgmath_use_preview:
template_name = 'preview.tex_t'
else:
template_name = 'template.tex_t'
for template_dir in config.templates_path:
template = path.join(confdir, template_dir, template_name)
if path.exists(template):
return LaTeXRenderer().render(template, variables)
return LaTeXRenderer(templates_path).render(template_name, variables)
def ensure_tempdir(builder: Builder) -> str:
"""Create temporary directory.
use only one tempdir per build -- the use of a directory is cleaner
than using temporary files, since we can clean up everything at once
just removing the whole directory (see cleanup_tempdir)
"""
if not hasattr(builder, '_imgmath_tempdir'):
builder._imgmath_tempdir = tempfile.mkdtemp() # type: ignore
return builder._imgmath_tempdir # type: ignore
def compile_math(latex: str, builder: Builder) -> str:
"""Compile LaTeX macros for math to DVI."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.tex')
with open(filename, 'w', encoding='utf-8') as f:
f.write(latex)
# build latex command; old versions of latex don't have the
# --output-directory option, so we have to manually chdir to the
# temp dir to run it.
command = [builder.config.imgmath_latex, '--interaction=nonstopmode']
# add custom args from the config file
command.extend(builder.config.imgmath_latex_args)
command.append('math.tex')
try:
subprocess.run(command, stdout=PIPE, stderr=PIPE, cwd=tempdir, check=True)
return path.join(tempdir, 'math.dvi')
except OSError as exc:
logger.warning(__('LaTeX command %r cannot be run (needed for math '
'display), check the imgmath_latex setting'),
builder.config.imgmath_latex)
raise InvokeError from exc
except CalledProcessError as exc:
raise MathExtError('latex exited with error', exc.stderr, exc.stdout) from exc
def convert_dvi_to_image(command: List[str], name: str) -> Tuple[str, str]:
"""Convert DVI file to specific image format."""
try:
ret = subprocess.run(command, stdout=PIPE, stderr=PIPE, check=True, encoding='ascii')
return ret.stdout, ret.stderr
except OSError as exc:
logger.warning(__('%s command %r cannot be run (needed for math '
'display), check the imgmath_%s setting'),
name, command[0], name)
raise InvokeError from exc
except CalledProcessError as exc:
raise MathExtError('%s exited with error' % name, exc.stderr, exc.stdout) from exc
def convert_dvi_to_png(dvipath: str, builder: Builder) -> Tuple[str, int]:
"""Convert DVI file to PNG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.png')
name = 'dvipng'
command = [builder.config.imgmath_dvipng, '-o', filename, '-T', 'tight', '-z9']
command.extend(builder.config.imgmath_dvipng_args)
if builder.config.imgmath_use_preview:
command.append('--depth')
command.append(dvipath)
stdout, stderr = convert_dvi_to_image(command, name)
depth = None
if builder.config.imgmath_use_preview:
for line in stdout.splitlines():
matched = depth_re.match(line)
if matched:
depth = int(matched.group(1))
write_png_depth(filename, depth)
break
return filename, depth
def convert_dvi_to_svg(dvipath: str, builder: Builder) -> Tuple[str, int]:
"""Convert DVI file to SVG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.svg')
name = 'dvisvgm'
command = [builder.config.imgmath_dvisvgm, '-o', filename]
command.extend(builder.config.imgmath_dvisvgm_args)
command.append(dvipath)
stdout, stderr = convert_dvi_to_image(command, name)
depth = None
if builder.config.imgmath_use_preview:
for line in stderr.splitlines(): # not stdout !
matched = depthsvg_re.match(line)
if matched:
depth = round(float(matched.group(1)) * 100 / 72.27) # assume 100ppi
write_svg_depth(filename, depth)
break
return filename, depth
def render_math(self: HTMLTranslator, math: str) -> Tuple[str, int]:
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
Return the filename relative to the built document and the "depth",
that is, the distance of image bottom and baseline in pixels, if the
option to use preview_latex is switched on.
Error handling may seem strange, but follows a pattern: if LaTeX or dvipng
(dvisvgm) aren't available, only a warning is generated (since that enables
people on machines without these programs to at least build the rest of the
docs successfully). If the programs are there, however, they may not fail
since that indicates a problem in the math source.
"""
image_format = self.builder.config.imgmath_image_format.lower()
if image_format not in SUPPORT_FORMAT:
raise MathExtError('imgmath_image_format must be either "png" or "svg"')
latex = generate_latex_macro(image_format,
math,
self.builder.config,
self.builder.confdir)
filename = "%s.%s" % (sha1(latex.encode()).hexdigest(), image_format)
relfn = posixpath.join(self.builder.imgpath, 'math', filename)
outfn = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
if path.isfile(outfn):
if image_format == 'png':
depth = read_png_depth(outfn)
elif image_format == 'svg':
depth = read_svg_depth(outfn)
return relfn, depth
# if latex or dvipng (dvisvgm) has failed once, don't bother to try again
if hasattr(self.builder, '_imgmath_warned_latex') or \
hasattr(self.builder, '_imgmath_warned_image_translator'):
return None, None
# .tex -> .dvi
try:
dvipath = compile_math(latex, self.builder)
except InvokeError:
self.builder._imgmath_warned_latex = True # type: ignore
return None, None
# .dvi -> .png/.svg
try:
if image_format == 'png':
imgpath, depth = convert_dvi_to_png(dvipath, self.builder)
elif image_format == 'svg':
imgpath, depth = convert_dvi_to_svg(dvipath, self.builder)
except InvokeError:
self.builder._imgmath_warned_image_translator = True # type: ignore
return None, None
# Move generated image on tempdir to build dir
ensuredir(path.dirname(outfn))
shutil.move(imgpath, outfn)
return relfn, depth
def cleanup_tempdir(app: Sphinx, exc: Exception) -> None:
if exc:
return
if not hasattr(app.builder, '_imgmath_tempdir'):
return
try:
shutil.rmtree(app.builder._mathpng_tempdir) # type: ignore
except Exception:
pass
def get_tooltip(self: HTMLTranslator, node: Element) -> str:
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node.astext()).strip()
return ''
def html_visit_math(self: HTMLTranslator, node: nodes.math) -> None:
try:
fname, depth = render_math(self, '$' + node.astext() + '$')
except MathExtError as exc:
msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
logger.warning(__('display latex %r: %s'), node.astext(), msg)
raise nodes.SkipNode from exc
if fname is None:
# something failed -- use text-only as a bad substitute
self.body.append('<span class="math">%s</span>' %
self.encode(node.astext()).strip())
else:
c = ('<img class="math" src="%s"' % fname) + get_tooltip(self, node)
if depth is not None:
c += ' style="vertical-align: %dpx"' % (-depth)
self.body.append(c + '/>')
raise nodes.SkipNode
def html_visit_displaymath(self: HTMLTranslator, node: nodes.math_block) -> None:
if node['nowrap']:
latex = node.astext()
else:
latex = wrap_displaymath(node.astext(), None, False)
try:
fname, depth = render_math(self, latex)
except MathExtError as exc:
msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
logger.warning(__('inline latex %r: %s'), node.astext(), msg)
raise nodes.SkipNode from exc
self.body.append(self.starttag(node, 'div', CLASS='math'))
self.body.append('<p>')
if node['number']:
number = get_node_equation_number(self, node)
self.body.append('<span class="eqno">(%s)' % number)
self.add_permalink_ref(node, _('Permalink to this equation'))
self.body.append('</span>')
if fname is None:
# something failed -- use text-only as a bad substitute
self.body.append('<span class="math">%s</span></p>\n</div>' %
self.encode(node.astext()).strip())
else:
self.body.append(('<img src="%s"' % fname) + get_tooltip(self, node) +
'/></p>\n</div>')
raise nodes.SkipNode
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_html_math_renderer('imgmath',
(html_visit_math, None),
(html_visit_displaymath, None))
app.add_config_value('imgmath_image_format', 'png', 'html')
app.add_config_value('imgmath_dvipng', 'dvipng', 'html')
app.add_config_value('imgmath_dvisvgm', 'dvisvgm', 'html')
app.add_config_value('imgmath_latex', 'latex', 'html')
app.add_config_value('imgmath_use_preview', False, 'html')
app.add_config_value('imgmath_dvipng_args',
['-gamma', '1.5', '-D', '110', '-bg', 'Transparent'],
'html')
app.add_config_value('imgmath_dvisvgm_args', ['--no-fonts'], 'html')
app.add_config_value('imgmath_latex_args', [], 'html')
app.add_config_value('imgmath_latex_preamble', '', 'html')
app.add_config_value('imgmath_add_tooltips', True, 'html')
app.add_config_value('imgmath_font_size', 12, 'html')
app.connect('build-finished', cleanup_tempdir)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| |
"""
TODO
"""
import abc, os
import tensorflow as tf
import numpy as np
from .misc_utils import get_logger
logger = get_logger(__name__)
class Model(metaclass=abc.ABCMeta):
"""
TODO
"""
def __init__(self):
self.is_trained = False
@abc.abstractmethod
def train(self, feature_tensor, correct):
"""
TODO
"""
raise NotImplementedError("This class must be subclassed")
@abc.abstractmethod
def test(self, feature_tensor, correct):
"""
TODO
"""
raise NotImplementedError("This class must be subclassed")
@abc.abstractmethod
def store(self, store_path, session):
"""
TODO
"""
raise NotImplementedError("This class must be subclassed")
@abc.abstractmethod
def restore(self, store_path, session):
"""
TODO
"""
raise NotImplementedError("This class must be subclassed")
def test_cnn():
return CNN()
import numpy as np
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size)) # pylint:disable=E1101
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
class CNN():
def __init__(self,
n_features=20,
n_classes=3,
embedding_dim=300,
filter_sizes=[3,4,5],
num_filters=128,
dropout_keep_prob=0,
l2_reg_lambda=0,
batch_size=64,
num_epochs=10,
evaluate_every=100,
checkpoint_every=100,
allow_soft_placement=True,
log_device_placement=False,
max_words_in_sentence=20,
vocab_size=300000,
store_path=None,
word2vec_path=None,
name=None):
self.embedding_dim = embedding_dim
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.dropout_keep_prob = dropout_keep_prob
self.l2_reg_lambda = l2_reg_lambda
self.batch_size = batch_size
self.num_epochs = num_epochs
self.evaluate_every = evaluate_every
self.checkpoint_every = checkpoint_every
self.allow_soft_placement = allow_soft_placement
self.log_device_placement = log_device_placement
self.max_words_in_sentence = max_words_in_sentence
self.n_features = n_features
self.n_classes = n_classes
self.vocab_size = vocab_size
self.graph = tf.Graph()
self.store_path = store_path
self.word2vec_path = word2vec_path
if self.word2vec_path is not None:
self.embedding_matrix = Word2Vec(self.word2vec_path, 'connective_token').data.syn0[:self.vocab_size]
else:
self.embedding_matrix = np.random.randn(vocab_size, embedding_dim)
logger.debug("Embedding matrix shape: {}".format(self.embedding_matrix.shape))
with self.graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=self.allow_soft_placement,
log_device_placement=self.log_device_placement)
sess = tf.Session(config=session_conf)
self.sess = sess
with self.sess.as_default():
self.cnn = TextCNN(
sequence_length=self.max_words_in_sentence,
num_classes=self.n_classes,
vocab_size=self.vocab_size,
embedding_size=self.embedding_dim,
filter_sizes=self.filter_sizes,
num_filters=self.num_filters,
l2_reg_lambda=self.l2_reg_lambda)
# Define Training procedure
self.global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdagradOptimizer(0.01)
grads_and_vars = optimizer.compute_gradients(self.cnn.loss)
self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
# # Keep track of gradient values and sparsity (optional)
# grad_summaries = []
# for g, v in grads_and_vars:
# if g is not None:
# grad_hist_summary = tf.histogram_summary("{}/grad/hist".format(v.name), g)
# sparsity_summary = tf.scalar_summary("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
# grad_summaries.append(grad_hist_summary)
# grad_summaries.append(sparsity_summary)
# grad_summaries_merged = tf.merge_summary(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.scalar_summary("loss", self.cnn.loss)
acc_summary = tf.scalar_summary("accuracy", self.cnn.accuracy)
# Train Summaries
self.train_summary_op = tf.merge_summary([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
self.train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph_def)
# Dev summaries
self.dev_summary_op = tf.merge_summary([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
self.dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph_def)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
self.checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver = tf.train.Saver(tf.all_variables())
# Initialize all variables
feed = {self.cnn.embedding_matrix: self.embedding_matrix}
sess.run(tf.initialize_all_variables(), feed_dict=feed)
def train_step(self, x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
self.cnn.input_x: x_batch,
self.cnn.input_y: y_batch,
self.cnn.dropout_keep_prob: self.dropout_keep_prob,
}
_, step, summaries, loss, accuracy = self.sess.run([self.train_op, self.global_step, self.train_summary_op, self.cnn.loss, self.cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
self.train_summary_writer.add_summary(summaries, step)
def dev_step(self, x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
with self.graph.as_default(), self.sess.as_default():
feed_dict = {
self.cnn.input_x: x_batch,
self.cnn.input_y: y_batch,
self.cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = self.sess.run([self.global_step, self.dev_summary_op, self.cnn.loss, self.cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
def train(self, feature_tensor, correct):
feature_tensor = np.squeeze(feature_tensor, axis=1)
correct = self.massage_answers(correct)
with self.graph.as_default(), self.sess.as_default():
x_train, x_dev = feature_tensor[:-1000], feature_tensor[-1000:]
y_train, y_dev = correct[:-1000], correct[-1000:]
# Generate batches
batches = batch_iter(list(zip(x_train, y_train)), self.batch_size, self.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
self.train_step(x_batch, y_batch)
current_step = tf.train.global_step(self.sess, self.global_step)
if current_step % self.evaluate_every == 0:
print("\nEvaluation:")
self.dev_step(x_dev, y_dev, writer=self.dev_summary_writer)
print("")
if current_step % self.checkpoint_every == 0:
#path = self.saver.save(self.sess, self.checkpoint_prefix, global_step=current_step)
#print("Saved model checkpoint to {}\n".format(path))
pass
if self.store_path:
self.store(self.store_path, self.sess)
def test(self, feature_tensor):
feature_tensor = np.squeeze(feature_tensor, axis=1) # Assume 3-dim tensor, need to be 2-dim
with self.graph.as_default(), self.sess.as_default():
x_test = feature_tensor
self.restore(self.store_path, self.sess)
# Training loop. For each batch...
feed_dict = {
self.cnn.input_x: x_test,
self.cnn.dropout_keep_prob: 1.0,
}
step, predictions = self.sess.run([self.global_step, self.cnn.predictions], feed_dict)
return predictions
def massage_answers(self, correct):
labels_dense = np.array(correct)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * self.n_classes
labels_one_hot = np.zeros((num_labels, self.n_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
logger.debug("Converted dense vector to onehot with shape {}".format(labels_one_hot.shape))
return labels_one_hot
def restore(self, store_path, session):
saver = tf.train.Saver()
saver.restore(session, store_path)
logger.info("Restored model from {}".format(store_path))
def store(self, store_path, session):
os.makedirs(os.path.join(*store_path.split("/")[:-1]), exist_ok=True)
saver = tf.train.Saver()
saver.save(session, store_path)
logger.debug("Stored model at {}".format(store_path))
from .extractors import Word2Vec
import time
import datetime
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.embedding_matrix = tf.placeholder(tf.float32, shape=(vocab_size, embedding_size))
W = tf.Variable(self.embedding_matrix, name="W")
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Avgpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
from sklearn import svm
from sklearn.externals import joblib
class SVM(Model):
def __init__(self, n_features, n_classes, kernel, store_path=None, name=None):
self.kernel = kernel
self.n_features = n_features
self.n_classes = n_classes
self.store_path = store_path
self.model = None
self.name = name
def restore(self, store_path):
logger.info("Restoring model from {}".format(store_path))
return joblib.load(store_path)
def store(self, model, store_path):
logger.info("Storing model at {}".format(store_path))
return joblib.dump(model, store_path)
def train(self, feature_tensor, correct):
logger.info("Training model...")
squeezed = feature_tensor.squeeze(axis=1)
clf = svm.SVC(kernel=self.kernel)
model = clf.fit(squeezed, correct)
if self.store_path:
self.store(model, self.store_path)
self.model = model
logger.info("Training session done")
def test(self, feature_tensor):
logger.info("Loading model...")
self.model = self.restore(self.store_path)
logger.info("Testing model...")
squeezed = feature_tensor.squeeze(axis=1)
return self.model.predict(squeezed)
class LogisticRegression(Model):
"""
Simple logreg model just to have some sort of baseline.
"""
def __init__(self, n_features, n_classes, batch_size, epochs, store_path=None, name=None):
self.graph = tf.Graph()
with self.graph.as_default():
self.n_features = n_features
self.n_classes = n_classes
self.batch_size = batch_size
self.train_x = tf.placeholder(tf.float32, shape=(None, 1, n_features), name='x')
self.train_y = tf.placeholder(tf.float32, shape=[None, n_classes], name='y')
self.weights = tf.Variable(tf.random_normal((n_features, n_classes), stddev=0.01),
name='weights')
self.epochs = epochs
self.store_path = store_path
self.name = name
super(LogisticRegression, self).__init__()
def restore(self, store_path, session):
saver = tf.train.Saver()
saver.restore(session, store_path)
logger.info("Restored model from {}".format(store_path))
def store(self, store_path, session):
os.makedirs(os.path.join(*store_path.split("/")[:-1]), exist_ok=True)
saver = tf.train.Saver([self.weights])
saver.save(session, store_path)
logger.debug("Stored model at {}".format(store_path))
def massage_answers(self, correct):
labels_dense = np.array(correct)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * self.n_classes
labels_one_hot = np.zeros((num_labels, self.n_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
logger.debug("Converted dense vector to onehot with shape {}".format(labels_one_hot.shape))
return labels_one_hot
def train(self, feature_tensor, correct):
"""
TODO
"""
with self.graph.as_default():
n_instances, _, _ = feature_tensor.shape
assert n_instances == len(correct), \
"Shape mismatch: feature tensor: {}, correct: {}".format(feature_tensor.shape,
len(correct))
correct_onehot = self.massage_answers(correct)
logger.debug("Setting up training. feature_tensor has shape {},\
correct_onehot has shape {}".format(feature_tensor.shape,
correct_onehot.shape))
squeezed = tf.squeeze(self.train_x, squeeze_dims=[1]) # Assume 3-dim tensor, need to be 2-dim
p_y_given_x = tf.matmul(squeezed, self.weights, name='p_y_given_x')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(p_y_given_x, self.train_y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
start_i = range(0, n_instances, self.batch_size)
end_i = range(self.batch_size, n_instances, self.batch_size)
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
logger.info("Starting training session...")
for epoch_i in range(self.epochs):
logger.debug("Epoch: {}".format(epoch_i))
for start, end in zip(start_i, end_i):
logger.debug("Batch {}-{}".format(start, end))
sess.run(train_op, feed_dict={self.train_x: feature_tensor[start:end],
self.train_y: correct_onehot[start:end]})
if self.store_path:
self.store(self.store_path, sess)
logger.info("Training session done.")
def test(self, feature_tensor):
with self.graph.as_default():
squeezed = tf.squeeze(self.train_x, squeeze_dims=[1]) # Assume 3-dim tensor, need to be 2-dim
p_y_given_x = tf.matmul(squeezed, self.weights)
predicted_classes = tf.argmax(p_y_given_x, 1)
with tf.Session() as sess:
self.restore(self.store_path, sess)
init = tf.initialize_all_variables()
sess.run(init)
predicted_classes = sess.run(predicted_classes, feed_dict={self.train_x: feature_tensor})
return predicted_classes
| |
import unittest
import numpy as np
import numpy.testing as np_test
from pgmpy.inference import VariableElimination
from pgmpy.inference import BeliefPropagation
from pgmpy.models import BayesianModel
from pgmpy.models import JunctionTree
from pgmpy.factors import TabularCPD
from pgmpy.factors import Factor
class TestVariableElimination(unittest.TestCase):
def setUp(self):
self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
('J', 'L'), ('G', 'L')])
cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
cpd_j = TabularCPD('J', 2,
[[0.9, 0.6, 0.7, 0.1],
[0.1, 0.4, 0.3, 0.9]],
['R', 'A'], [2, 2])
cpd_q = TabularCPD('Q', 2,
[[0.9, 0.2],
[0.1, 0.8]],
['J'], [2])
cpd_l = TabularCPD('L', 2,
[[0.9, 0.45, 0.8, 0.1],
[0.1, 0.55, 0.2, 0.9]],
['G', 'J'], [2, 2])
cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r)
self.bayesian_inference = VariableElimination(self.bayesian_model)
# All the values that are used for comparision in the all the tests are
# found using SAMIAM (assuming that it is correct ;))
def test_query_single_variable(self):
query_result = self.bayesian_inference.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
def test_query_multiple_variable(self):
query_result = self.bayesian_inference.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
def test_query_single_variable_with_evidence(self):
query_result = self.bayesian_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
def test_query_multiple_variable_with_evidence(self):
query_result = self.bayesian_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_max_marginal(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(), 0.1659, decimal=4)
def test_max_marginal_var(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G']), 0.5714, decimal=4)
def test_max_marginal_var1(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R']),
0.4055, decimal=4)
def test_max_marginal_var2(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R', 'A']),
0.3260, decimal=4)
def test_map_query(self):
map_query = self.bayesian_inference.map_query()
self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0,
'L': 0})
def test_map_query_with_evidence(self):
map_query = self.bayesian_inference.map_query(['A', 'R', 'L'],
{'J': 0, 'Q': 1, 'G': 0})
self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def test_induced_graph(self):
induced_graph = self.bayesian_inference.induced_graph(['G', 'Q', 'A', 'J', 'L', 'R'])
result_edges = sorted([sorted(x) for x in induced_graph.edges()])
self.assertEqual([['A', 'J'], ['A', 'R'], ['G', 'J'], ['G', 'L'],
['J', 'L'], ['J', 'Q'], ['J', 'R'], ['L', 'R']],
result_edges)
def test_induced_width(self):
result_width = self.bayesian_inference.induced_width(['G', 'Q', 'A', 'J', 'L', 'R'])
self.assertEqual(2, result_width)
def tearDown(self):
del self.bayesian_inference
del self.bayesian_model
class TestBeliefPropagation(unittest.TestCase):
def setUp(self):
self.junction_tree = JunctionTree([(('A', 'B'), ('B', 'C')),
(('B', 'C'), ('C', 'D'))])
phi1 = Factor(['A', 'B'], [2, 3], range(6))
phi2 = Factor(['B', 'C'], [3, 2], range(6))
phi3 = Factor(['C', 'D'], [2, 2], range(4))
self.junction_tree.add_factors(phi1, phi2, phi3)
self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
('J', 'L'), ('G', 'L')])
cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
cpd_j = TabularCPD('J', 2,
[[0.9, 0.6, 0.7, 0.1],
[0.1, 0.4, 0.3, 0.9]],
['R', 'A'], [2, 2])
cpd_q = TabularCPD('Q', 2,
[[0.9, 0.2],
[0.1, 0.8]],
['J'], [2])
cpd_l = TabularCPD('L', 2,
[[0.9, 0.45, 0.8, 0.1],
[0.1, 0.55, 0.2, 0.9]],
['G', 'J'], [2, 2])
cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r)
def test_calibrate_clique_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.calibrate()
clique_belief = belief_propagation.get_clique_beliefs()
phi1 = Factor(['A', 'B'], [2, 3], range(6))
phi2 = Factor(['B', 'C'], [3, 2], range(6))
phi3 = Factor(['C', 'D'], [2, 2], range(4))
b_A_B = phi1 * (phi3.marginalize('D', inplace=False) * phi2).marginalize('C', inplace=False)
b_B_C = phi2 * (phi1.marginalize('A', inplace=False) * phi3.marginalize('D', inplace=False))
b_C_D = phi3 * (phi1.marginalize('A', inplace=False) * phi2).marginalize('B', inplace=False)
np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values)
np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values)
np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values)
def test_calibrate_sepset_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.calibrate()
sepset_belief = belief_propagation.get_sepset_beliefs()
phi1 = Factor(['A', 'B'], [2, 3], range(6))
phi2 = Factor(['B', 'C'], [3, 2], range(6))
phi3 = Factor(['C', 'D'], [2, 2], range(4))
b_B = (phi1 * (phi3.marginalize('D', inplace=False) *
phi2).marginalize('C', inplace=False)).marginalize('A', inplace=False)
b_C = (phi2 * (phi1.marginalize('A', inplace=False) *
phi3.marginalize('D', inplace=False))).marginalize('B', inplace=False)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values)
def test_max_calibrate_clique_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.max_calibrate()
clique_belief = belief_propagation.get_clique_beliefs()
phi1 = Factor(['A', 'B'], [2, 3], range(6))
phi2 = Factor(['B', 'C'], [3, 2], range(6))
phi3 = Factor(['C', 'D'], [2, 2], range(4))
b_A_B = phi1 * (phi3.maximize('D', inplace=False) * phi2).maximize('C', inplace=False)
b_B_C = phi2 * (phi1.maximize('A', inplace=False) * phi3.maximize('D', inplace=False))
b_C_D = phi3 * (phi1.maximize('A', inplace=False) * phi2).maximize('B', inplace=False)
np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values)
np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values)
np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values)
def test_max_calibrate_sepset_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.max_calibrate()
sepset_belief = belief_propagation.get_sepset_beliefs()
phi1 = Factor(['A', 'B'], [2, 3], range(6))
phi2 = Factor(['B', 'C'], [3, 2], range(6))
phi3 = Factor(['C', 'D'], [2, 2], range(4))
b_B = (phi1 * (phi3.maximize('D', inplace=False) *
phi2).maximize('C', inplace=False)).maximize('A', inplace=False)
b_C = (phi2 * (phi1.maximize('A', inplace=False) *
phi3.maximize('D', inplace=False))).maximize('B', inplace=False)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values)
# All the values that are used for comparision in the all the tests are
# found using SAMIAM (assuming that it is correct ;))
def test_query_single_variable(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
def test_query_multiple_variable(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
def test_query_single_variable_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
def test_query_multiple_variable_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_map_query(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
map_query = belief_propagation.map_query()
self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0,
'L': 0})
def test_map_query_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
map_query = belief_propagation.map_query(['A', 'R', 'L'],
{'J': 0, 'Q': 1, 'G': 0})
self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def tearDown(self):
del self.junction_tree
del self.bayesian_model
| |
#!/usr/bin/env python
import math
import random
import pygame
import pygame.color as color
import boid
import mapparser as mp
from prm import PRMGenerator
class Configuration:
"""
Static class that holds important global variables
"""
## Dimensions of the screen
dim = xSize, ySize = 1000, 600
## Number of sample points to use in the PRM
numSamplePoints = 300
## Defines the radius of all goals
goalRadius = 20
## Maximum speed of the boids
boidSpeed = 30
## Number of neighbours the boids will influence
## a boid's heading
numNeighbours = 3
## The screen used to draw the simluation
screen = pygame.display.set_mode(dim)
## The list of colors (used for debugging purposes)
colorList = map(
lambda k: color.THECOLORS[k],
color.THECOLORS.keys()
)
boid_radius = 4
class PolyFileConfiguration(Configuration):
"""
Extends the Configuration class. This configuration gets the
obstacles from .map files that have been created.
"""
def parseDynamicObstacles(self, dynamic_obstacles_fp):
"""
Parses the obstacle map file and creates polygon objects with random
behaviour by default. All obstacles (static/dynamic) obtains a list
each other in the form of a list.
"""
# parse the obstacle file and create Polygons
if dynamic_obstacles_fp is not None:
dyn_obstacles = mp.mparse(dynamic_obstacles_fp, self.obstacleList)
for obst in dyn_obstacles:
self.obstacleList.append(obst)
# pass a copy of obstacle list to each dynamic obstacle
for obst in self.obstacleList:
obst.obstacles = list(self.obstacleList) # make cpy not ref
if obst.dynamic:
obst.removeSelfFromObstacleList()
def _autoGeneratedObstacleValid(self, node, nogo_zones):
"""
Checks to see if the vertices are inside an obstacle already
"""
# check against obstacles
for obst in self.obstacleList:
res = obst.pointInPoly(node)
if res:
return False # node is invalid, it is inside an obstacle
if obst.norm(node, obst.getPoint(node)) <= 20:
return False
# check if node is near nogo_zones
for zone in nogo_zones:
distance_between = math.sqrt(
(zone[0] - node[0]) ** 2 + (zone[1] - node[1]) ** 2
)
if distance_between < 150:
return False
# check against other about-to-be obstacles (i.e. other nodes)
# make sure they are no where near each other!
for other_node_set in self.nodes[:-1]:
for other_node in other_node_set:
n_1 = list(node)
n_2 = list(other_node)
# pythagoras theorem
x = n_1[0] - n_2[0]
y = n_1[1] - n_2[1]
dist = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
# should be bigger than 30 units away from each other
if dist < 30:
return False
return True
def autoGenerateDynamicObstacles(self, start_point, end_point):
"""
Auto generate dynamic obstacles
"""
width = 30
height = 30
top_left = [0, 0]
top_right = [0, 0]
bottom_left = [0, 0]
bottom_right = [0, 0]
self.nodes = list()
obst_generated = 0
obst_validated = True
nogo_zones = [start_point, end_point]
while (obst_generated < self.auto_gen_number):
# generate vertices at random co-ordinates for dynamic obstacles
top_left[0] = random.randint(40, Configuration.xSize - 40)
top_left[1] = random.randint(40, Configuration.ySize - 40)
top_right = [top_left[0] + width, top_left[1]]
bottom_right = [top_right[0], top_right[1] - height]
bottom_left = [bottom_right[0] - width, bottom_right[1]]
self.nodes += [[
tuple(top_left),
tuple(top_right),
tuple(bottom_right),
tuple(bottom_left)
]]
# check to see if vertices lye in obstacles
for node in self.nodes[-1]:
if self._autoGeneratedObstacleValid(node, nogo_zones) is False:
obst_validated = False
self.nodes.pop() # remove from nodes
break
else:
obst_validated = True
# if obstacle nodes are good, increment obstacles generated
if obst_validated:
obst_generated += 1
# with the vertices generated create the dynamic obstacle objects
dyn_obstacles = mp.mparse(
None,
self.obstacleList,
nodes=self.nodes,
start_point=start_point,
end_point=end_point
)
for obst in dyn_obstacles:
self.obstacleList.append(obst)
# pass a copy of obstacle list to each dynamic obstacle
for obst in self.obstacleList:
obst.obstacles = list(self.obstacleList) # make cpy not ref
if obst.dynamic:
obst.removeSelfFromObstacleList()
def determinePositionInConfig(self, i, flockSize, startPoint):
boid_radius = Configuration.boid_radius
init_length = math.ceil(math.sqrt(flockSize))
down = int(i // init_length)
accross = int(i % init_length)
return (
startPoint[0] + 3 * boid_radius * accross,
startPoint[1] + 3 * boid_radius * down
)
def initVars(
self,
startPoint,
endPoint,
flockSize,
**kwargs
):
"""
Parses the file to get the obstacle list. Creates a PRM generator to
create a global map of the environment. Gets the list of intermediate
goals. Also, creates the list of boids used in the simulation
@param startPoint The starting point for the boids
@param endPoint The ending point for the boids
@param flockSize The size of the flock (number of boids)
@param filename The name of the file that contains the environment map
"""
## List of obstacles
# parse static obstalces
self.obstacleList = mp.mparse(kwargs.get("map_file", "maps/m1.map"))
# parse dynamic obstalces
dynamic_obstacles_fp = kwargs.get("dynamic_obstacles", None)
self.parseDynamicObstacles(dynamic_obstacles_fp)
# auto geneate dynamic obstacles
self.auto_gen_obst = kwargs.get("auto_gen_obst", False)
self.auto_gen_number = kwargs.get("auto_gen_number", 0)
if self.auto_gen_obst:
self.autoGenerateDynamicObstacles(startPoint, endPoint)
## Starting point
self.startPoint = startPoint
## Ending point
self.endPoint = endPoint
## Object containing variables and mehtods for the global planner
self.prmGen = PRMGenerator(
startPoint,
endPoint,
self.obstacleList,
Configuration.xSize,
Configuration.ySize,
Configuration.numSamplePoints,
Configuration.screen
)
## List of intermediate goals derived by the global planner
self.goalList = self.prmGen.generate(Configuration.goalRadius)
## List of boids in the flock
self.boidList = [
boid.Boid(
startPoint,
endPoint,
Configuration.boidSpeed,
Configuration.xSize,
Configuration.ySize,
Configuration.numNeighbours,
boid.guassianFunc,
self.obstacleList,
self.goalList,
self.prmGen,
Configuration.screen,
Configuration.colorList[i],
Configuration.boid_radius,
self.determinePositionInConfig(i, flockSize, startPoint)
) for i in range(flockSize)
]
| |
import contextlib
import datetime
import uuid
import sqlalchemy as sa
from sqlalchemy import Date
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import orm
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import TypeDecorator
from sqlalchemy import util
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
def make_uuid():
return uuid.uuid4().hex
@contextlib.contextmanager
def conditional_sane_rowcount_warnings(
update=False, delete=False, only_returning=False
):
warnings = ()
if (
only_returning
and not testing.db.dialect.supports_sane_rowcount_returning
) or (
not only_returning and not testing.db.dialect.supports_sane_rowcount
):
if update:
warnings += (
"Dialect .* does not support updated rowcount - "
"versioning cannot be verified.",
)
if delete:
warnings += (
"Dialect .* does not support deleted rowcount - "
"versioning cannot be verified.",
)
with expect_warnings(*warnings):
yield
else:
yield
class NullVersionIdTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"version_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer),
Column("value", String(40), nullable=False),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
def _fixture(self):
Foo, version_table = self.classes.Foo, self.tables.version_table
self.mapper_registry.map_imperatively(
Foo,
version_table,
version_id_col=version_table.c.version_id,
version_id_generator=False,
)
s1 = fixture_session()
return s1
def test_null_version_id_insert(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
# Prior to the fix for #3673, you would have been allowed to insert
# the above record with a NULL version_id and you would have gotten
# the following error when you tried to update it. Now you should
# get a FlushError on the initial insert.
#
# A value is required for bind parameter 'version_table_version_id'
# UPDATE version_table SET value=?
# WHERE version_table.id = ?
# AND version_table.version_id = ?
# parameters: [{'version_table_id': 1, 'value': 'f1rev2'}]]
assert_raises_message(
sa.orm.exc.FlushError,
"Instance does not contain a non-NULL version value",
s1.commit,
)
def test_null_version_id_update(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1", version_id=1)
s1.add(f1)
s1.commit()
# Prior to the fix for #3673, you would have been allowed to update
# the above record with a NULL version_id, and it would look like
# this, post commit: Foo(id=1, value='f1rev2', version_id=None). Now
# you should get a FlushError on update.
f1.value = "f1rev2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
f1.version_id = None
assert_raises_message(
sa.orm.exc.FlushError,
"Instance does not contain a non-NULL version value",
s1.commit,
)
class VersioningTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"version_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer, nullable=False),
Column("value", String(40), nullable=False),
test_needs_acid=True,
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
def _fixture(self):
Foo, version_table = self.classes.Foo, self.tables.version_table
self.mapper_registry.map_imperatively(
Foo, version_table, version_id_col=version_table.c.version_id
)
s1 = fixture_session()
return s1
@engines.close_open_connections
def test_notsane_warning(self):
Foo = self.classes.Foo
save = testing.db.dialect.supports_sane_rowcount
testing.db.dialect.supports_sane_rowcount = False
try:
s1 = self._fixture()
f1 = Foo(value="f1")
f2 = Foo(value="f2")
s1.add_all((f1, f2))
s1.commit()
f1.value = "f1rev2"
assert_raises(sa.exc.SAWarning, s1.commit)
finally:
testing.db.dialect.supports_sane_rowcount = save
def test_basic(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
f2 = Foo(value="f2")
s1.add_all((f1, f2))
s1.commit()
f1.value = "f1rev2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
s2 = fixture_session()
f1_s = s2.get(Foo, f1.id)
f1_s.value = "f1rev3"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s2.commit()
f1.value = "f1rev3mine"
# Only dialects with a sane rowcount can detect the
# StaleDataError
if testing.db.dialect.supports_sane_rowcount_returning:
assert_raises_message(
sa.orm.exc.StaleDataError,
r"UPDATE statement on table 'version_table' expected "
r"to update 1 row\(s\); 0 were matched.",
s1.commit,
),
s1.rollback()
else:
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
# new in 0.5 ! don't need to close the session
f1 = s1.get(Foo, f1.id)
f2 = s1.get(Foo, f2.id)
f1_s.value = "f1rev4"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s2.commit()
s1.delete(f1)
s1.delete(f2)
if testing.db.dialect.supports_sane_multi_rowcount:
assert_raises_message(
sa.orm.exc.StaleDataError,
r"DELETE statement on table 'version_table' expected "
r"to delete 2 row\(s\); 1 were matched.",
s1.commit,
)
else:
with conditional_sane_rowcount_warnings(delete=True):
s1.commit()
def test_multiple_updates(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
f2 = Foo(value="f2")
s1.add_all((f1, f2))
s1.commit()
f1.value = "f1rev2"
f2.value = "f2rev2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(
s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
[(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)],
)
def test_bulk_insert(self):
Foo = self.classes.Foo
s1 = self._fixture()
s1.bulk_insert_mappings(
Foo, [{"id": 1, "value": "f1"}, {"id": 2, "value": "f2"}]
)
eq_(
s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
[(1, "f1", 1), (2, "f2", 1)],
)
def test_bulk_update(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
f2 = Foo(value="f2")
s1.add_all((f1, f2))
s1.commit()
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.bulk_update_mappings(
Foo,
[
{"id": f1.id, "value": "f1rev2", "version_id": 1},
{"id": f2.id, "value": "f2rev2", "version_id": 1},
],
)
s1.commit()
eq_(
s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
[(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)],
)
def test_bump_version(self):
"""test that version number can be bumped.
Ensures that the UPDATE or DELETE is against the
last committed version of version_id_col, not the modified
state.
"""
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.commit()
eq_(f1.version_id, 1)
f1.version_id = 2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(f1.version_id, 2)
# skip an id, test that history
# is honored
f1.version_id = 4
f1.value = "something new"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(f1.version_id, 4)
f1.version_id = 5
s1.delete(f1)
with conditional_sane_rowcount_warnings(delete=True):
s1.commit()
eq_(s1.query(Foo).count(), 0)
@engines.close_open_connections
def test_versioncheck(self):
"""query.with_lockmode performs a 'version check' on an already loaded
instance"""
Foo = self.classes.Foo
s1 = self._fixture()
f1s1 = Foo(value="f1 value")
s1.add(f1s1)
s1.commit()
s2 = fixture_session()
f1s2 = s2.get(Foo, f1s1.id)
f1s2.value = "f1 new value"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s2.commit()
# load, version is wrong
assert_raises_message(
sa.orm.exc.StaleDataError,
r"Instance .* has version id '\d+' which does not "
r"match database-loaded version id '\d+'",
s1.get,
Foo,
f1s1.id,
with_for_update=dict(read=True),
)
# reload it - this expires the old version first
s1.refresh(f1s1, with_for_update={"read": True})
# now assert version OK
s1.get(Foo, f1s1.id, with_for_update=dict(read=True))
# assert brand new load is OK too
s1.close()
s1.get(Foo, f1s1.id, with_for_update=dict(read=True))
def test_versioncheck_not_versioned(self):
"""ensure the versioncheck logic skips if there isn't a
version_id_col actually configured"""
Foo = self.classes.Foo
version_table = self.tables.version_table
self.mapper_registry.map_imperatively(Foo, version_table)
s1 = fixture_session()
f1s1 = Foo(value="f1 value", version_id=1)
s1.add(f1s1)
s1.commit()
s1.query(Foo).with_for_update(read=True).where(Foo.id == f1s1.id).one()
@engines.close_open_connections
@testing.requires.update_nowait
def test_versioncheck_for_update(self):
"""query.with_lockmode performs a 'version check' on an already loaded
instance"""
Foo = self.classes.Foo
s1 = self._fixture()
f1s1 = Foo(value="f1 value")
s1.add(f1s1)
s1.commit()
s2 = fixture_session()
f1s2 = s2.get(Foo, f1s1.id)
# not sure if I like this API
s2.refresh(f1s2, with_for_update=True)
f1s2.value = "f1 new value"
assert_raises(
exc.DBAPIError, s1.refresh, f1s1, with_for_update={"nowait": True}
)
s1.rollback()
with conditional_sane_rowcount_warnings(update=True):
s2.commit()
s1.refresh(f1s1, with_for_update={"nowait": True})
assert f1s1.version_id == f1s2.version_id
def test_update_multi_missing_broken_multi_rowcount(self):
@util.memoized_property
def rowcount(self):
if len(self.context.compiled_parameters) > 1:
return -1
else:
return self.context.rowcount
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
), patch("sqlalchemy.engine.cursor.CursorResult.rowcount", rowcount):
Foo = self.classes.Foo
s1 = self._fixture()
f1s1 = Foo(value="f1 value")
s1.add(f1s1)
s1.commit()
f1s1.value = "f2 value"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.flush()
eq_(f1s1.version_id, 2)
def test_update_delete_no_plain_rowcount(self):
with patch.object(
config.db.dialect, "supports_sane_rowcount", False
), patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
Foo = self.classes.Foo
s1 = self._fixture()
f1s1 = Foo(value="f1 value")
s1.add(f1s1)
s1.commit()
f1s1.value = "f2 value"
with expect_warnings(
"Dialect .* does not support updated rowcount - "
"versioning cannot be verified."
):
s1.flush()
eq_(f1s1.version_id, 2)
s1.delete(f1s1)
with expect_warnings(
"Dialect .* does not support deleted rowcount - "
"versioning cannot be verified."
):
s1.flush()
@engines.close_open_connections
def test_noversioncheck(self):
"""test query.with_lockmode works when the mapper has no version id
col"""
Foo, version_table = self.classes.Foo, self.tables.version_table
s1 = fixture_session()
self.mapper_registry.map_imperatively(Foo, version_table)
f1s1 = Foo(value="foo", version_id=0)
s1.add(f1s1)
s1.commit()
s2 = fixture_session()
f1s2 = (
s2.query(Foo)
.with_for_update(read=True)
.where(Foo.id == f1s1.id)
.one()
)
assert f1s2.id == f1s1.id
assert f1s2.value == f1s1.value
def test_merge_no_version(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.commit()
f1.value = "f2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
f2 = Foo(id=f1.id, value="f3")
f3 = s1.merge(f2)
assert f3 is f1
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(f3.version_id, 3)
def test_merge_correct_version(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.commit()
f1.value = "f2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
f2 = Foo(id=f1.id, value="f3", version_id=2)
f3 = s1.merge(f2)
assert f3 is f1
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(f3.version_id, 3)
def test_merge_incorrect_version(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.commit()
f1.value = "f2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
f2 = Foo(id=f1.id, value="f3", version_id=1)
assert_raises_message(
orm_exc.StaleDataError,
"Version id '1' on merged state "
"<Foo at .*?> does not match existing version '2'. "
"Leave the version attribute unset when "
"merging to update the most recent version.",
s1.merge,
f2,
)
def test_merge_incorrect_version_not_in_session(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.commit()
f1.value = "f2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
f2 = Foo(id=f1.id, value="f3", version_id=1)
s1.close()
assert_raises_message(
orm_exc.StaleDataError,
"Version id '1' on merged state "
"<Foo at .*?> does not match existing version '2'. "
"Leave the version attribute unset when "
"merging to update the most recent version.",
s1.merge,
f2,
)
class VersionOnPostUpdateTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"node",
metadata,
Column("id", Integer, primary_key=True),
Column("version_id", Integer),
Column("parent_id", ForeignKey("node.id")),
)
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
pass
def _fixture(self, o2m, post_update, insert=True):
Node = self.classes.Node
node = self.tables.node
self.mapper_registry.map_imperatively(
Node,
node,
properties={
"related": relationship(
Node,
remote_side=node.c.id if not o2m else node.c.parent_id,
post_update=post_update,
)
},
version_id_col=node.c.version_id,
)
s = fixture_session()
n1 = Node(id=1)
n2 = Node(id=2)
if insert:
s.add_all([n1, n2])
s.flush()
return s, n1, n2
def test_o2m_plain(self):
s, n1, n2 = self._fixture(o2m=True, post_update=False)
n1.related.append(n2)
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 2)
def test_m2o_plain(self):
s, n1, n2 = self._fixture(o2m=False, post_update=False)
n1.related = n2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 2)
eq_(n2.version_id, 1)
def test_o2m_post_update(self):
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 2)
def test_m2o_post_update(self):
s, n1, n2 = self._fixture(o2m=False, post_update=True)
n1.related = n2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 2)
eq_(n2.version_id, 1)
def test_o2m_post_update_not_assoc_w_insert(self):
s, n1, n2 = self._fixture(o2m=True, post_update=True, insert=False)
n1.related.append(n2)
s.add_all([n1, n2])
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 1)
def test_m2o_post_update_not_assoc_w_insert(self):
s, n1, n2 = self._fixture(o2m=False, post_update=True, insert=False)
n1.related = n2
s.add_all([n1, n2])
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 1)
@testing.requires.sane_rowcount_w_returning
def test_o2m_post_update_version_assert(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
# outwit the database transaction isolation and SQLA's
# expiration at the same time by using different Session on
# same transaction
s2 = Session(bind=s.connection(mapper=Node))
s2.query(Node).filter(Node.id == n2.id).update({"version_id": 3})
s2.commit()
assert_raises_message(
orm_exc.StaleDataError,
"UPDATE statement on table 'node' expected to "
r"update 1 row\(s\); 0 were matched.",
s.flush,
)
def test_o2m_post_update_no_sane_rowcount(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
with patch.object(
config.db.dialect, "supports_sane_rowcount", False
), patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
s2 = Session(bind=s.connection(mapper=Node))
s2.query(Node).filter(Node.id == n2.id).update({"version_id": 3})
s2.commit()
with expect_warnings(
"Dialect .* does not support updated rowcount - "
"versioning cannot be verified."
):
s.flush()
@testing.requires.sane_rowcount_w_returning
def test_m2o_post_update_version_assert(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=False, post_update=True)
n1.related = n2
# outwit the database transaction isolation and SQLA's
# expiration at the same time by using different Session on
# same transaction
s2 = Session(bind=s.connection(mapper=Node))
s2.query(Node).filter(Node.id == n1.id).update({"version_id": 3})
s2.commit()
assert_raises_message(
orm_exc.StaleDataError,
"UPDATE statement on table 'node' expected to "
r"update 1 row\(s\); 0 were matched.",
s.flush,
)
class NoBumpOnRelationshipTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", ForeignKey("a.id")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
def _run_test(self, auto_version_counter=True):
A, B = self.classes("A", "B")
s = fixture_session(future=True)
if auto_version_counter:
a1 = A()
else:
a1 = A(version_id=1)
s.add(a1)
s.commit()
eq_(a1.version_id, 1)
b1 = B()
b1.a = a1
s.add(b1)
s.commit()
eq_(a1.version_id, 1)
def test_plain_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test()
def test_functional_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
version_id_generator=lambda num: (num or 0) + 1,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test()
def test_no_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
version_id_generator=False,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test(False)
class ColumnTypeTest(fixtures.MappedTest):
__backend__ = True
__requires__ = ("sane_rowcount",)
@classmethod
def define_tables(cls, metadata):
class SpecialType(TypeDecorator):
impl = Date
cache_ok = True
def process_bind_param(self, value, dialect):
assert isinstance(value, datetime.date)
return value
Table(
"version_table",
metadata,
Column("id", SpecialType, primary_key=True),
Column("version_id", Integer, nullable=False),
Column("value", String(40), nullable=False),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
def _fixture(self):
Foo, version_table = self.classes.Foo, self.tables.version_table
self.mapper_registry.map_imperatively(
Foo, version_table, version_id_col=version_table.c.version_id
)
s1 = fixture_session()
return s1
@engines.close_open_connections
def test_update(self):
Foo = self.classes.Foo
s1 = self._fixture()
f1 = Foo(id=datetime.date.today(), value="f1")
s1.add(f1)
s1.commit()
f1.value = "f1rev2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
class RowSwitchTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"p",
metadata,
Column("id", String(10), primary_key=True),
Column("version_id", Integer, default=1, nullable=False),
Column("data", String(50)),
)
Table(
"c",
metadata,
Column("id", String(10), ForeignKey("p.id"), primary_key=True),
Column("version_id", Integer, default=1, nullable=False),
Column("data", String(50)),
)
@classmethod
def setup_classes(cls):
class P(cls.Basic):
pass
class C(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
p, c, C, P = cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P
cls.mapper_registry.map_imperatively(
P,
p,
version_id_col=p.c.version_id,
properties={
"c": relationship(
C, uselist=False, cascade="all, delete-orphan"
)
},
)
cls.mapper_registry.map_imperatively(
C, c, version_id_col=c.c.version_id
)
def test_row_switch(self):
P = self.classes.P
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
session.delete(p)
session.add(P(id="P1", data="really a row-switch"))
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
session.commit()
def test_child_row_switch(self):
P, C = self.classes.P, self.classes.C
assert P.c.property.strategy.use_get
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
p.c = C(data="child version 1")
session.commit()
p = session.query(P).first()
p.c = C(data="child row-switch")
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
session.commit()
class AlternateGeneratorTest(fixtures.MappedTest):
__backend__ = True
__requires__ = ("sane_rowcount",)
@classmethod
def define_tables(cls, metadata):
Table(
"p",
metadata,
Column("id", String(10), primary_key=True),
Column("version_id", String(32), nullable=False),
Column("data", String(50)),
)
Table(
"c",
metadata,
Column("id", String(10), ForeignKey("p.id"), primary_key=True),
Column("version_id", String(32), nullable=False),
Column("data", String(50)),
)
@classmethod
def setup_classes(cls):
class P(cls.Basic):
pass
class C(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
p, c, C, P = cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P
cls.mapper_registry.map_imperatively(
P,
p,
version_id_col=p.c.version_id,
version_id_generator=lambda x: make_uuid(),
properties={
"c": relationship(
C, uselist=False, cascade="all, delete-orphan"
)
},
)
cls.mapper_registry.map_imperatively(
C,
c,
version_id_col=c.c.version_id,
version_id_generator=lambda x: make_uuid(),
)
def test_row_switch(self):
P = self.classes.P
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
session.delete(p)
session.add(P(id="P1", data="really a row-switch"))
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
session.commit()
def test_child_row_switch_one(self):
P, C = self.classes.P, self.classes.C
assert P.c.property.strategy.use_get
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
p.c = C(data="child version 1")
session.commit()
p = session.query(P).first()
p.c = C(data="child row-switch")
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
session.commit()
@testing.requires.sane_rowcount_w_returning
def test_child_row_switch_two(self):
P = self.classes.P
# TODO: not sure this test is
# testing exactly what its looking for
sess1 = fixture_session()
sess1.add(P(id="P1", data="P version 1"))
sess1.commit()
sess1.close()
p1 = sess1.query(P).first()
sess2 = fixture_session()
p2 = sess2.query(P).first()
sess1.delete(p1)
sess1.commit()
# this can be removed and it still passes
sess1.add(P(id="P1", data="P version 2"))
sess1.commit()
p2.data = "P overwritten by concurrent tx"
if testing.db.dialect.supports_sane_rowcount:
assert_raises_message(
orm.exc.StaleDataError,
r"UPDATE statement on table 'p' expected to update "
r"1 row\(s\); 0 were matched.",
sess2.commit,
)
else:
sess2.commit()
class PlainInheritanceTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer, nullable=True),
Column("data", String(50)),
)
Table(
"sub",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("sub_data", String(50)),
)
@classmethod
def setup_classes(cls):
class Base(cls.Basic):
pass
class Sub(Base):
pass
def test_update_child_table_only(self):
Base, sub, base, Sub = (
self.classes.Base,
self.tables.sub,
self.tables.base,
self.classes.Sub,
)
self.mapper_registry.map_imperatively(
Base, base, version_id_col=base.c.version_id
)
self.mapper_registry.map_imperatively(Sub, sub, inherits=Base)
s = fixture_session()
s1 = Sub(data="b", sub_data="s")
s.add(s1)
s.commit()
s1.sub_data = "s2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s.commit()
eq_(s1.version_id, 2)
class InheritanceTwoVersionIdsTest(fixtures.MappedTest):
"""Test versioning where both parent/child table have a
versioning column.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer, nullable=True),
Column("data", String(50)),
)
Table(
"sub",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("version_id", Integer, nullable=False),
Column("sub_data", String(50)),
)
@classmethod
def setup_classes(cls):
class Base(cls.Basic):
pass
class Sub(Base):
pass
def test_base_both(self):
Base, sub, base, Sub = (
self.classes.Base,
self.tables.sub,
self.tables.base,
self.classes.Sub,
)
self.mapper_registry.map_imperatively(
Base, base, version_id_col=base.c.version_id
)
self.mapper_registry.map_imperatively(Sub, sub, inherits=Base)
session = fixture_session()
b1 = Base(data="b1")
session.add(b1)
session.commit()
eq_(b1.version_id, 1)
# base is populated
eq_(session.connection().scalar(select(base.c.version_id)), 1)
def test_sub_both(self):
Base, sub, base, Sub = (
self.classes.Base,
self.tables.sub,
self.tables.base,
self.classes.Sub,
)
self.mapper_registry.map_imperatively(
Base, base, version_id_col=base.c.version_id
)
self.mapper_registry.map_imperatively(Sub, sub, inherits=Base)
session = fixture_session()
s1 = Sub(data="s1", sub_data="s1")
session.add(s1)
session.commit()
# table is populated
eq_(session.connection().scalar(select(sub.c.version_id)), 1)
# base is populated
eq_(session.connection().scalar(select(base.c.version_id)), 1)
def test_sub_only(self):
Base, sub, base, Sub = (
self.classes.Base,
self.tables.sub,
self.tables.base,
self.classes.Sub,
)
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(
Sub, sub, inherits=Base, version_id_col=sub.c.version_id
)
session = fixture_session()
s1 = Sub(data="s1", sub_data="s1")
session.add(s1)
session.commit()
# table is populated
eq_(session.connection().scalar(select(sub.c.version_id)), 1)
# base is not
eq_(session.connection().scalar(select(base.c.version_id)), None)
def test_mismatch_version_col_warning(self):
Base, sub, base, Sub = (
self.classes.Base,
self.tables.sub,
self.tables.base,
self.classes.Sub,
)
self.mapper_registry.map_imperatively(
Base, base, version_id_col=base.c.version_id
)
assert_raises_message(
exc.SAWarning,
"Inheriting version_id_col 'version_id' does not "
"match inherited version_id_col 'version_id' and will not "
"automatically populate the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning.",
self.mapper_registry.map_imperatively,
Sub,
sub,
inherits=Base,
version_id_col=sub.c.version_id,
)
class ServerVersioningTest(fixtures.MappedTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.sql import ColumnElement
from sqlalchemy.ext.compiler import compiles
import itertools
counter = itertools.count(1)
class IncDefault(ColumnElement):
pass
@compiles(IncDefault)
def compile_(element, compiler, **kw):
# cache the counter value on the statement
# itself so the assertsql system gets the same
# value when it compiles the statement a second time
stmt = compiler.statement
if hasattr(stmt, "_counter"):
return stmt._counter
else:
stmt._counter = str(next(counter))
return stmt._counter
Table(
"version_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"version_id",
Integer,
nullable=False,
default=IncDefault(),
onupdate=IncDefault(),
),
Column("value", String(40), nullable=False),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
def _fixture(self, expire_on_commit=True, eager_defaults=False):
Foo, version_table = self.classes.Foo, self.tables.version_table
self.mapper_registry.map_imperatively(
Foo,
version_table,
version_id_col=version_table.c.version_id,
version_id_generator=False,
eager_defaults=eager_defaults,
)
s1 = fixture_session(expire_on_commit=expire_on_commit)
return s1
def test_insert_col(self):
self._test_insert_col()
def test_insert_col_eager_defaults(self):
self._test_insert_col(eager_defaults=True)
def _test_insert_col(self, **kw):
sess = self._fixture(**kw)
f1 = self.classes.Foo(value="f1")
sess.add(f1)
statements = [
# note that the assertsql tests the rule against
# "default" - on a "returning" backend, the statement
# includes "RETURNING"
CompiledSQL(
"INSERT INTO version_table (version_id, value) "
"VALUES (1, :value)",
lambda ctx: [{"value": "f1"}],
)
]
if not testing.db.dialect.implicit_returning:
# DBs without implicit returning, we must immediately
# SELECT for the new version id
statements.append(
CompiledSQL(
"SELECT version_table.version_id "
"AS version_table_version_id "
"FROM version_table WHERE version_table.id = :pk_1",
lambda ctx: [{"pk_1": 1}],
)
)
self.assert_sql_execution(testing.db, sess.flush, *statements)
def test_update_col(self):
self._test_update_col()
def test_update_col_eager_defaults(self):
self._test_update_col(eager_defaults=True)
def _test_update_col(self, **kw):
sess = self._fixture(**kw)
f1 = self.classes.Foo(value="f1")
sess.add(f1)
sess.flush()
f1.value = "f2"
statements = [
# note that the assertsql tests the rule against
# "default" - on a "returning" backend, the statement
# includes "RETURNING"
CompiledSQL(
"UPDATE version_table SET version_id=2, value=:value "
"WHERE version_table.id = :version_table_id AND "
"version_table.version_id = :version_table_version_id",
lambda ctx: [
{
"version_table_id": 1,
"version_table_version_id": 1,
"value": "f2",
}
],
)
]
if not testing.db.dialect.implicit_returning:
# DBs without implicit returning, we must immediately
# SELECT for the new version id
statements.append(
CompiledSQL(
"SELECT version_table.version_id "
"AS version_table_version_id "
"FROM version_table WHERE version_table.id = :pk_1",
lambda ctx: [{"pk_1": 1}],
)
)
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
self.assert_sql_execution(testing.db, sess.flush, *statements)
@testing.requires.updateable_autoincrement_pks
def test_sql_expr_bump(self):
sess = self._fixture()
f1 = self.classes.Foo(value="f1")
sess.add(f1)
sess.flush()
eq_(f1.version_id, 1)
f1.id = self.classes.Foo.id + 0
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.flush()
eq_(f1.version_id, 2)
@testing.requires.updateable_autoincrement_pks
@testing.requires.returning
def test_sql_expr_w_mods_bump(self):
sess = self._fixture()
f1 = self.classes.Foo(id=2, value="f1")
sess.add(f1)
sess.flush()
eq_(f1.version_id, 1)
f1.id = self.classes.Foo.id + 3
with conditional_sane_rowcount_warnings(update=True):
sess.flush()
eq_(f1.id, 5)
eq_(f1.version_id, 2)
def test_multi_update(self):
sess = self._fixture()
f1 = self.classes.Foo(value="f1")
f2 = self.classes.Foo(value="f2")
f3 = self.classes.Foo(value="f3")
sess.add_all([f1, f2, f3])
sess.flush()
f1.value = "f1a"
f2.value = "f2a"
f3.value = "f3a"
statements = [
# note that the assertsql tests the rule against
# "default" - on a "returning" backend, the statement
# includes "RETURNING"
CompiledSQL(
"UPDATE version_table SET version_id=2, value=:value "
"WHERE version_table.id = :version_table_id AND "
"version_table.version_id = :version_table_version_id",
lambda ctx: [
{
"version_table_id": 1,
"version_table_version_id": 1,
"value": "f1a",
}
],
),
CompiledSQL(
"UPDATE version_table SET version_id=2, value=:value "
"WHERE version_table.id = :version_table_id AND "
"version_table.version_id = :version_table_version_id",
lambda ctx: [
{
"version_table_id": 2,
"version_table_version_id": 1,
"value": "f2a",
}
],
),
CompiledSQL(
"UPDATE version_table SET version_id=2, value=:value "
"WHERE version_table.id = :version_table_id AND "
"version_table.version_id = :version_table_version_id",
lambda ctx: [
{
"version_table_id": 3,
"version_table_version_id": 1,
"value": "f3a",
}
],
),
]
if not testing.db.dialect.implicit_returning:
# DBs without implicit returning, we must immediately
# SELECT for the new version id
statements.extend(
[
CompiledSQL(
"SELECT version_table.version_id "
"AS version_table_version_id "
"FROM version_table WHERE version_table.id = :pk_1",
lambda ctx: [{"pk_1": 1}],
),
CompiledSQL(
"SELECT version_table.version_id "
"AS version_table_version_id "
"FROM version_table WHERE version_table.id = :pk_1",
lambda ctx: [{"pk_1": 2}],
),
CompiledSQL(
"SELECT version_table.version_id "
"AS version_table_version_id "
"FROM version_table WHERE version_table.id = :pk_1",
lambda ctx: [{"pk_1": 3}],
),
]
)
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
self.assert_sql_execution(testing.db, sess.flush, *statements)
def test_delete_col(self):
sess = self._fixture()
f1 = self.classes.Foo(value="f1")
sess.add(f1)
sess.flush()
sess.delete(f1)
statements = [
# note that the assertsql tests the rule against
# "default" - on a "returning" backend, the statement
# includes "RETURNING"
CompiledSQL(
"DELETE FROM version_table "
"WHERE version_table.id = :id AND "
"version_table.version_id = :version_id",
lambda ctx: [{"id": 1, "version_id": 1}],
)
]
with conditional_sane_rowcount_warnings(delete=True):
self.assert_sql_execution(testing.db, sess.flush, *statements)
@testing.requires.sane_rowcount_w_returning
def test_concurrent_mod_err_expire_on_commit(self):
sess = self._fixture()
f1 = self.classes.Foo(value="f1")
sess.add(f1)
sess.commit()
f1.value
s2 = fixture_session()
f2 = s2.query(self.classes.Foo).first()
f2.value = "f2"
s2.commit()
f1.value = "f3"
assert_raises_message(
orm.exc.StaleDataError,
r"UPDATE statement on table 'version_table' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.commit,
)
@testing.requires.sane_rowcount_w_returning
def test_concurrent_mod_err_noexpire_on_commit(self):
sess = self._fixture(expire_on_commit=False)
f1 = self.classes.Foo(value="f1")
sess.add(f1)
sess.commit()
# here, we're not expired overall, so no load occurs and we
# stay without a version id, unless we've emitted
# a SELECT for it within the flush.
f1.value
s2 = fixture_session(expire_on_commit=False)
f2 = s2.query(self.classes.Foo).first()
f2.value = "f2"
s2.commit()
f1.value = "f3"
assert_raises_message(
orm.exc.StaleDataError,
r"UPDATE statement on table 'version_table' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.commit,
)
class ManualVersionTest(fixtures.MappedTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("vid", Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(
cls.classes.A,
cls.tables.a,
version_id_col=cls.tables.a.c.vid,
version_id_generator=False,
)
def test_insert(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
sess.add(a1)
sess.commit()
eq_(a1.vid, 1)
def test_update(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
a1.vid = 2
a1.data = "d2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.commit()
eq_(a1.vid, 2)
@testing.requires.sane_rowcount_w_returning
def test_update_concurrent_check(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
a1.vid = 2
sess.execute(self.tables.a.update().values(vid=3))
a1.data = "d2"
assert_raises(orm_exc.StaleDataError, sess.commit)
def test_update_version_conditional(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
# change the data and UPDATE without
# incrementing version id
a1.data = "d2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.commit()
eq_(a1.vid, 1)
a1.data = "d3"
a1.vid = 2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.commit()
eq_(a1.vid, 2)
class ManualInheritanceVersionTest(fixtures.MappedTest):
run_define_tables = "each"
__backend__ = True
__requires__ = ("sane_rowcount",)
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("vid", Integer, nullable=False),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("b_data", String(30)),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(
cls.classes.A,
cls.tables.a,
version_id_col=cls.tables.a.c.vid,
version_id_generator=False,
)
cls.mapper_registry.map_imperatively(
cls.classes.B, cls.tables.b, inherits=cls.classes.A
)
def test_no_increment(self):
sess = fixture_session()
b1 = self.classes.B()
b1.vid = 1
b1.data = "d1"
sess.add(b1)
sess.commit()
# change col on subtable only without
# incrementing version id
b1.b_data = "bd2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.commit()
eq_(b1.vid, 1)
b1.b_data = "d3"
b1.vid = 2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
sess.commit()
eq_(b1.vid, 2)
class VersioningMappedSelectTest(fixtures.MappedTest):
# test for #4193, see also #4194 for related notes
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"version_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer, nullable=False),
Column("value", String(40), nullable=False),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
def _implicit_version_fixture(self):
Foo, version_table = self.classes.Foo, self.tables.version_table
current = (
version_table.select()
.where(version_table.c.id > 0)
.alias("current_table")
)
self.mapper_registry.map_imperatively(
Foo, current, version_id_col=version_table.c.version_id
)
s1 = fixture_session()
return s1
def _explicit_version_fixture(self):
Foo, version_table = self.classes.Foo, self.tables.version_table
current = (
version_table.select()
.where(version_table.c.id > 0)
.alias("current_table")
)
self.mapper_registry.map_imperatively(
Foo,
current,
version_id_col=version_table.c.version_id,
version_id_generator=False,
)
s1 = fixture_session()
return s1
def test_implicit(self):
Foo = self.classes.Foo
s1 = self._implicit_version_fixture()
f1 = Foo(value="f1")
f2 = Foo(value="f2")
s1.add_all((f1, f2))
s1.commit()
f1.value = "f1rev2"
f2.value = "f2rev2"
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.commit()
eq_(
s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
[(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)],
)
def test_explicit(self):
Foo = self.classes.Foo
s1 = self._explicit_version_fixture()
f1 = Foo(value="f1", version_id=1)
f2 = Foo(value="f2", version_id=1)
s1.add_all((f1, f2))
s1.flush()
# note this requires that the Session was not expired until
# we fix #4195
f1.value = "f1rev2"
f1.version_id = 2
f2.value = "f2rev2"
f2.version_id = 2
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
s1.flush()
eq_(
s1.query(Foo.id, Foo.value, Foo.version_id).order_by(Foo.id).all(),
[(f1.id, "f1rev2", 2), (f2.id, "f2rev2", 2)],
)
def test_implicit_no_readonly(self):
# test issue 4194
Foo = self.classes.Foo
s1 = self._implicit_version_fixture()
f1 = Foo(value="f1")
s1.add(f1)
s1.flush()
is_false(bool(inspect(Foo)._readonly_props))
def go():
eq_(f1.version_id, 1)
self.assert_sql_count(testing.db, go, 0)
def test_explicit_assign_from_expired(self):
# test issue 4195
Foo = self.classes.Foo
s1 = self._explicit_version_fixture()
configure_mappers()
is_true(Foo.version_id.impl.active_history)
f1 = Foo(value="f1", version_id=1)
s1.add(f1)
s1.flush()
s1.expire_all()
with conditional_sane_rowcount_warnings(
update=True, only_returning=True
):
f1.value = "f2"
f1.version_id = 2
s1.flush()
| |
# Copyright 2014 Treode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import *
from client_cache import *
import urllib3
from tx_clock import *
class TestClientCache(object):
def __init__(self, server, port=80):
self.server = server
self.port = port
self.tests = [
self.ClientCache_Init_Succeeds,
self.ClientCache_CreatingServerConnection_Succeeds,
self.ClientCache_ReadingFromCache_Succeeds,
self.ClientCache_Write]
# Test that we can create a ClientCache instance successfully
def ClientCache_Init_Succeeds(self):
print "test_client_cache_init",
server = self.server
max_age = 10
# Default headers
cache_default = ClientCache(server)
assert(cache_default != None)
# max_age header
cache_max_age = ClientCache(server,
max_age=max_age)
assert(cache_max_age != None)
# no_cache header
cache_no_cache = ClientCache(server, no_cache=True)
assert(cache_no_cache != None)
# max_age and no_cache headers
cache_both = ClientCache(server,
max_age=max_age, no_cache=True)
assert(cache_both != None)
print "PASSED!"
# Test that the ClientCache has a valid connection to the server
def ClientCache_CreatingServerConnection_Succeeds(self):
print "test_client_cache_connection",
server = self.server
max_age = 10
cache = ClientCache(server,
max_age=max_age)
result = cache.http_facade.pool.request('GET', '/')
assert(result != None)
print "PASSED!"
# Test that calling read generates the expected request with headers
def ClientCache_ReadingFromCache_Succeeds(self):
print "test_client_cache_read",
server = self.server
cache = ClientCache(server)
# Mock the request method
headers = {
"Date": "Wed, 14 Jan 2015 11:49:13 GMT",
"Last-Modified": "Fri, 10 May 2013 02:07:43 GMT",
"Read-TxClock": "10",
"Value-TxClock": "5",
"Vary": "Request-TxClock"
}
body = """{ "title": "Fruits",
"types": [
{ "fruit": "apple", "flavor": "sour" },
{ "fruit": "banana", "flavor": "mushy" } ] }"""
cached_time = TxClock(micro_seconds=long(headers["Read-TxClock"]))
value_time = TxClock(micro_seconds=long(headers["Value-TxClock"]))
json_value = json.loads(body)
cache.http_facade = Mock()
result = (cached_time, value_time, json_value)
cache.http_facade.read = Mock(return_value=result)
self.ClientCache_ReadingWithoutMaxAgeNoCacheParams_Succeeds(cache)
self.ClientCache_ReadingWithMaxAgeNoCacheParams_Succeeds(cache)
print "PASSED!"
def ClientCache_ReadingWithoutMaxAgeNoCacheParams_Succeeds(self, cache):
read_time = TxClock(10**10*6)
table = "table1"
key = "key1"
cache_result = cache.read(read_time, table, key)
json_result = cache_result.value
assert(json_result["title"] == "Fruits")
assert(json_result["types"] == [
{ "fruit": "apple", "flavor": "sour" },
{ "fruit": "banana", "flavor": "mushy" } ])
def ClientCache_ReadingWithMaxAgeNoCacheParams_Succeeds(self, cache):
read_time = TxClock(10*10**6)
# With cache and max age
table = "table1"
key = "key2"
max_age = 8
no_cache = True
condition_time = TxClock(10*10**6)
cache_result = cache.read(read_time, table, key,
max_age=max_age, no_cache=no_cache)
json_result = cache_result.value
assert(json_result["title"] == "Fruits")
assert(json_result["types"] == [
{ "fruit": "apple", "flavor": "sour" },
{ "fruit": "banana", "flavor": "mushy" } ])
# Without cache but max age
table = "table2"
key = "key42"
cache_result = cache.read(read_time, table, key,
max_age=max_age)
json_result = cache_result.value
assert(json_result["title"] == "Fruits")
assert(json_result["types"] == [
{ "fruit": "apple", "flavor": "sour" },
{ "fruit": "banana", "flavor": "mushy" } ])
def ClientCache_Write(self):
print "test_client_cache_write ",
server = self.server
max_age = 10
cache = ClientCache(server,
max_age=max_age)
self.ClientCache_Write_Succeeds(cache)
self.ClientCache_Write_Fails(cache)
print "PASSED!"
def ClientCache_Write_Succeeds(self, cache):
# Mock the urlopen method
status = 200
response = urllib3.response.HTTPResponse(status=status)
cache.http_facade = Mock()
cache.http_facade.write = Mock(return_value=response)
condition_time = TxClock(5*10**6)
ops_dict = {
("table1", "key1"): ("create", 42),
("table2", "key2"): ("hold", None),
("table3", "key3"): ("update", 54),
("table4", "key4"): ("delete", 79)
}
try:
cache.write(condition_time, ops_dict)
except:
# Expected, since we are not running a real DB in this test
pass
def ClientCache_Write_Fails(self, cache):
# Mock the request method
status = 412
response = urllib3.response.HTTPResponse(status=status,
headers={"Value-TxClock": "992"})
cache.http_facade = Mock()
cache.http_facade.write = Mock(return_value=response)
condition_time = TxClock(5*10**6)
ops_dict = {
("table1", "key1"): ("update", 112)
}
try:
cache.write(condition_time, ops_dict)
# The write should fail
assert(False)
except StaleException as exn:
# You failed. Good job!
(read_time, value_time) = exn.toTuple()
assert(str(value_time) == "992")
def test_all(self):
for test in self.tests:
test()
test_instance = TestClientCache("www.bbc.com")
test_instance.test_all()
if __name__ == "__main__":
pass
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
from object_detection.core import matcher
from object_detection.utils import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False,
use_matmul_gather=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather)
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: {}, unmatched: {}'.format(
self._matched_threshold,
self._unmatched_threshold))
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix, valid_rows):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
valid_rows: a boolean tensor of shape [N] indicating valid rows.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = (
tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1]) *
tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32))
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| |
from pandas.compat import range
import re
import operator
import warnings
from numpy import nan
import numpy as np
from pandas import _np_version_under1p8
from pandas.sparse.api import SparseArray
from pandas._sparse import IntIndex
from pandas.util.testing import assert_almost_equal, assertRaisesRegexp
import pandas.util.testing as tm
class TestSparseArray(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
self.assertEqual(arr.dtype, np.float64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.fill_value, 0)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
arr = SparseArray([0, 1, 2, 4], dtype=None)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
tm.assert_sp_array_equal(arr, SparseArray([np.nan, 1, 2, np.nan]))
self.assertEqual(arr.dtype, np.float64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray(data=[0, 1, 2, 3],
sparse_index=IntIndex(4, [0, 1, 2, 3]),
dtype=np.int64)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
arr = SparseArray(data=[0, 1, 2, 3],
sparse_index=IntIndex(4, [0, 1, 2, 3]),
dtype=None)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
# scalar input
arr = SparseArray(data=1,
sparse_index=IntIndex(1, [0]),
dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
def test_get_item(self):
self.assertTrue(np.isnan(self.arr[1]))
self.assertEqual(self.arr[2], 1)
self.assertEqual(self.arr[7], 5)
self.assertEqual(self.zarr[0], 0)
self.assertEqual(self.zarr[2], 1)
self.assertEqual(self.zarr[7], 5)
errmsg = re.compile("bounds")
assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[11])
assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[-11])
self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1])
def test_take(self):
self.assertTrue(np.isnan(self.arr.take(0)))
self.assertTrue(np.isscalar(self.arr.take(2)))
# np.take in < 1.8 doesn't support scalar indexing
if not _np_version_under1p8:
self.assertEqual(self.arr.take(2), np.take(self.arr_data, 2))
self.assertEqual(self.arr.take(6), np.take(self.arr_data, 6))
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
assertRaisesRegexp(IndexError, "bounds", lambda: self.arr.take(11))
self.assertRaises(IndexError, lambda: self.arr.take(-11))
def test_take_invalid_kwargs(self):
msg = "take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, self.arr.take,
[2, 3], foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, self.arr.take,
[2, 3], out=self.arr)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, self.arr.take,
[2, 3], mode='clip')
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
sparse.take(np.array([1, -6]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([0, np.nan, 0], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
sparse.take(np.array([1, -6]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
with tm.assertRaises(IndexError):
sparse.take(np.array([1, -6]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]))
with tm.assertRaises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
assertRaisesRegexp(TypeError, "item assignment", setitem)
assertRaisesRegexp(TypeError, "item assignment", setslice)
def test_constructor_from_too_large_array(self):
assertRaisesRegexp(TypeError, "expected dimension <= 1 data",
SparseArray, np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
self.assertEqual(res.fill_value, 0)
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
self.assertFalse((self.arr.sp_values[:3] == 0).any())
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
self.assertTrue((self.arr.sp_values[:3] == 0).all())
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
self.assertEqual(arr.dtype, bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3]))
for dense in [arr.to_dense(), arr.values]:
self.assertEqual(dense.dtype, bool)
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
self.assertEqual(arr.dtype, np.bool)
self.assertFalse(arr.fill_value)
arr = SparseArray([True, False, True], dtype=np.bool)
self.assertEqual(arr.dtype, np.bool)
self.assertFalse(arr.fill_value)
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
self.assertEqual(arr.dtype, np.bool)
self.assertTrue(arr.fill_value)
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
self.assertEqual(arr.dtype, np.float32)
tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3]))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([0, 2]))
for dense in [arr.to_dense(), arr.values]:
self.assertEqual(dense.dtype, np.float32)
self.assert_numpy_array_equal(dense, data)
def test_astype(self):
res = self.arr.astype('f8')
res.sp_values[:3] = 27
self.assertFalse((self.arr.sp_values[:3] == 27).any())
assertRaisesRegexp(TypeError, "floating point", self.arr.astype, 'i8')
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
def _get_base(values):
base = values.base
while base.base is not None:
base = base.base
return base
assert (_get_base(arr2) is _get_base(self.arr))
def test_values_asarray(self):
assert_almost_equal(self.arr.values, self.arr_data)
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([1, np.nan, 0, 3, 0])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.values[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.values[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.values[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with tm.assertRaises(IndexError):
sparse[4:, :]
with tm.assertRaises(IndexError):
# check numpy compat
dense[4:, :]
def test_binary_operators(self):
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
tm.assertIsInstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
tm.assertIsInstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
tm.assertIsInstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
tm.assertIsInstance(res4, SparseArray)
# ignore this if the actual op raises (e.g. pow)
try:
exp = op(first.values, 4)
exp_fv = op(first.fill_value, 4)
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.values, exp)
except ValueError:
pass
def _check_inplace_op(op):
tmp = arr1.copy()
self.assertRaises(NotImplementedError, op, tmp, arr2)
bin_ops = [operator.add, operator.sub, operator.mul, operator.truediv,
operator.floordiv, operator.pow]
for op in bin_ops:
_check_op(op, arr1, arr2)
_check_op(op, farr1, farr2)
inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv', 'ipow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, op))
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = self.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([0, 0, 0, 0])
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([0, 0, 0, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3])
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0)
tm.assert_sp_array_equal(res, exp)
class TestSparseArrayArithmetic(tm.TestCase):
_multiprocess_can_split_ = True
def _check_numeric_ops(self, a, b, a_dense, b_dense):
tm.assert_numpy_array_equal((a + b).to_dense(), a_dense + b_dense)
tm.assert_numpy_array_equal((b + a).to_dense(), b_dense + a_dense)
tm.assert_numpy_array_equal((a - b).to_dense(), a_dense - b_dense)
tm.assert_numpy_array_equal((b - a).to_dense(), b_dense - a_dense)
tm.assert_numpy_array_equal((a * b).to_dense(), a_dense * b_dense)
tm.assert_numpy_array_equal((b * a).to_dense(), b_dense * a_dense)
tm.assert_numpy_array_equal((a / b).to_dense(), a_dense / b_dense)
tm.assert_numpy_array_equal((b / a).to_dense(), b_dense / a_dense)
tm.assert_numpy_array_equal((a // b).to_dense(), a_dense // b_dense)
tm.assert_numpy_array_equal((b // a).to_dense(), b_dense // a_dense)
tm.assert_numpy_array_equal((a % b).to_dense(), a_dense % b_dense)
tm.assert_numpy_array_equal((b % a).to_dense(), b_dense % a_dense)
tm.assert_numpy_array_equal((a ** b).to_dense(), a_dense ** b_dense)
tm.assert_numpy_array_equal((b ** a).to_dense(), b_dense ** a_dense)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
def _check(res):
tm.assertIsInstance(res, SparseArray)
self.assertEqual(res.dtype, np.bool)
self.assertIsInstance(res.fill_value, bool)
_check(a == b)
tm.assert_numpy_array_equal((a == b).to_dense(), a_dense == b_dense)
_check(a != b)
tm.assert_numpy_array_equal((a != b).to_dense(), a_dense != b_dense)
_check(a >= b)
tm.assert_numpy_array_equal((a >= b).to_dense(), a_dense >= b_dense)
_check(a <= b)
tm.assert_numpy_array_equal((a <= b).to_dense(), a_dense <= b_dense)
_check(a > b)
tm.assert_numpy_array_equal((a > b).to_dense(), a_dense > b_dense)
_check(a < b)
tm.assert_numpy_array_equal((a < b).to_dense(), a_dense < b_dense)
def test_float_scalar(self):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = SparseArray(values, kind=kind)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=0)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=2)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
def test_float_scalar_comparison(self):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = SparseArray(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
values = np.array([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = np.array([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_same_index_comparison(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = np.array([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = np.array([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_different_kind(self):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = SparseArray(values, kind='integer')
b = SparseArray(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, kind='integer', fill_value=0)
b = SparseArray(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
a = SparseArray(values, kind='integer', fill_value=0)
b = SparseArray(rvalues, kind='block', fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = SparseArray(values, kind='integer', fill_value=1)
b = SparseArray(rvalues, kind='block', fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_comparison(self):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
class TestSparseArrayAnalytics(tm.TestCase):
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
self.assertEqual(out, 45.0)
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
self.assertEqual(out, 40.0)
out = SparseArray(data, fill_value=np.nan).sum()
self.assertEqual(out, 40.0)
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
self.assertEqual(out, 45.0)
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
self.assertEqual(out, 40.0)
out = np.sum(SparseArray(data, fill_value=np.nan))
self.assertEqual(out, 40.0)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.sum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.sum,
SparseArray(data), out=out)
def test_cumsum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).cumsum()
expected = SparseArray(data.cumsum())
tm.assert_sp_array_equal(out, expected)
# TODO: gh-12855 - return a SparseArray here
data[5] = np.nan
out = SparseArray(data, fill_value=2).cumsum()
self.assertNotIsInstance(out, SparseArray)
tm.assert_numpy_array_equal(out, data.cumsum())
out = SparseArray(data, fill_value=np.nan).cumsum()
expected = SparseArray(np.array([
0, 1, 3, 6, 10, np.nan, 16, 23, 31, 40]))
tm.assert_sp_array_equal(out, expected)
def test_numpy_cumsum(self):
data = np.arange(10).astype(float)
out = np.cumsum(SparseArray(data))
expected = SparseArray(data.cumsum())
tm.assert_sp_array_equal(out, expected)
# TODO: gh-12855 - return a SparseArray here
data[5] = np.nan
out = np.cumsum(SparseArray(data, fill_value=2))
self.assertNotIsInstance(out, SparseArray)
tm.assert_numpy_array_equal(out, data.cumsum())
out = np.cumsum(SparseArray(data, fill_value=np.nan))
expected = SparseArray(np.array([
0, 1, 3, 6, 10, np.nan, 16, 23, 31, 40]))
tm.assert_sp_array_equal(out, expected)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
SparseArray(data), out=out)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
self.assertEqual(out, 4.5)
data[5] = np.nan
out = SparseArray(data).mean()
self.assertEqual(out, 40.0 / 9)
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
self.assertEqual(out, 4.5)
data[5] = np.nan
out = np.mean(SparseArray(data))
self.assertEqual(out, 40.0 / 9)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.mean,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.mean,
SparseArray(data), out=out)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| |
#!/usr/bin/env python
#
# TODO:
# - Task colors:
# - User-defined using config file.
# - Automagically chosen from color space.
# - Advanced algorithm (contact Hannes Pretorius).
# - Koos' specs:
# - Resources and tasks sorted in read-in order (default)
# or alphabetically (flag).
# - Have proper gnuplot behavior on windows/x11, eps/pdf, latex terminals.
# - Create and implement algorithm for critical path analysis.
# - Split generic stuff into a Gantt class, and specific stuff into the main.
#
# gantt.py ganttfile | gnuplot
import itertools, sys, getopt
from ConfigParser import ConfigParser
rectangleHeight = 0.8 #: Height of a rectangle in units.
class Activity(object):
"""
Container for activity information.
@ivar resource: Resource name.
@type resource: C{str}
@ivar start: Start time of the activity.
@type start: C{float}
@ivar stop: End time of the activity.
@type stop: C{float}
@ivar task: Name of the task/activity being performed.
@type task: C{str}
"""
def __init__(self, resource, start, stop, task):
self.resource = resource
self.start = start
self.stop = stop
self.task = task
class Rectangle(object):
"""
Container for rectangle information.
"""
def __init__(self, bottomleft, topright, fillcolor):
self.bottomleft = bottomleft
self.topright = topright
self.fillcolor = fillcolor
self.fillstyle = 'solid 0.8'
self.linewidth = 2
class ColorBook(object):
"""
Class managing colors.
@ivar colors
@ivar palette
@ivar prefix
"""
def __init__(self, colorfname, tasks):
"""
Construct a ColorBook object.
@param colorfname: Name of the color config file (if specified).
@type colorfname: C{str} or C{None}
@param tasks: Existing task types.
@type tasks: C{list} of C{str}
"""
if colorfname:
values = self.load_config(colorfname, tasks)
else:
values = self.fixed(tasks)
self.colors, self.palette, self.prefix = values
def load_config(self, colorfname, tasks):
"""
Read task colors from a configuration file.
"""
palettedef = 'model RGB'
colorprefix = 'rgb'
# Read in task colors from configuration file
config = ConfigParser()
config.optionxform = str # makes option names case sensitive
config.readfp(open(colorfname, 'r'))
# Colors are RGB colornames
colors = dict(config.items('Colors'))
# Raise KeyError if no color is specified for a task
nocolors = [t for t in tasks if not colors.has_key(t)]
if nocolors:
msg = 'Could not find task color for ' + ', '.join(nocolors)
raise KeyError(msg)
return colors, palettedef, colorprefix
def fixed(self, tasks):
"""
Pick colors from a pre-defined palette.
"""
# Set task colors
# SE colors
# (see http://w3.wtb.tue.nl/nl/organisatie/systems_engineering/\
# info_for_se_students/how2make_a_poster/pictures/)
# Decrease the 0.8 values for less transparent colors.
se_palette = {"se_red": (1.0, 0.8, 0.8),
"se_pink": (1.0, 0.8, 1.0),
"se_violet": (0.8, 0.8, 1.0),
"se_blue": (0.8, 1.0, 1.0),
"se_green": (0.8, 1.0, 0.8),
"se_yellow": (1.0, 1.0, 0.8)}
se_gradient = ["se_red", "se_pink", "se_violet",
"se_blue", "se_green", "se_yellow"]
se_palettedef = '( ' + \
', '.join(('%d ' % n +
' '.join((str(x) for x in se_palette[c]))
for n, c in enumerate(se_gradient))) + \
' )'
palettedef = 'model RGB defined %s' % se_palettedef
colorprefix = 'palette frac'
# Colors are fractions from the palette defined
colors = dict((t, '%0.2f' % (float(n)/(len(tasks)-1)))
for n, t in enumerate(tasks))
return colors, palettedef, colorprefix
class DummyClass(object):
"""
Dummy class for storing option values in.
"""
def make_rectangles(activities, resource_map, colors):
"""
Construct a collection of L{Rectangle} for all activities.
@param activities: Activities being performed.
@type activities: C{iterable} of L{Activity}
@param resource_map: Indices of all resources.
@type resource_map: C{dict} of C{str} to C{int}
@param colors: Colors for all tasks.
@type colors: C{dict} of C{str} to C{str}
@return: Collection of rectangles to draw.
@rtype: C{list} of L{Rectangle}
"""
rectangles = []
for act in activities:
ypos = resource_map[act.resource]
bottomleft = (act.start, ypos - 0.5 * rectangleHeight)
topright = (act.stop, ypos + 0.5 * rectangleHeight)
fillcolor = colors[act.task]
rectangles.append(Rectangle(bottomleft, topright, fillcolor))
return rectangles
def load_ganttfile(ganttfile):
"""
Load the resource/task file.
@param ganttfile: Name of the gantt file.
@type ganttfile: C{str}
@return: Activities loaded from the file, collection of
(resource, start, end, task) activities.
@rtype: C{list} of L{Activity}
"""
activities = []
for line in open(ganttfile, 'r').readlines():
line = line.strip().split()
if len(line) == 0:
continue
resource = line[0]
start = float(line[1])
stop = float(line[2])
task = line[3]
activities.append(Activity(resource, start, stop, task))
return activities
def make_unique_tasks_resources(alphasort, activities):
"""
Construct collections of unique task names and resource names.
@param alphasort: Sort resources and tasks alphabetically.
@type alphasort: C{bool}
@param activities: Activities to draw.
@type activities: C{list} of L{Activity}
@return: Collections of task-types and resources.
@rtype: C{list} of C{str}, C{list} of C{str}
"""
# Create list with unique resources and tasks in activity order.
resources = []
tasks = []
for act in activities:
if act.resource not in resources:
resources.append(act.resource)
if act.task not in tasks:
tasks.append(act.task)
# Sort such that resources and tasks appear in alphabetical order
if alphasort:
resources.sort()
tasks.sort()
# Resources are read from top (y=max) to bottom (y=1)
resources.reverse()
return tasks, resources
def generate_plotdata(activities, resources, tasks, rectangles, options,
resource_map, color_book):
"""
Generate Gnuplot lines.
"""
xmin = 0
xmax = max(act.stop for act in activities)
ymin = 0 + (rectangleHeight / 2)
ymax = len(resources) + 1 - (rectangleHeight / 2)
xlabel = 'time'
ylabel = ''
title = options.plottitle
ytics = ''.join(['(',
', '.join(('"%s" %d' % item)
for item in resource_map.iteritems()),
')'])
# outside and 2 characters from the graph
key_position = 'outside width +2'
grid_tics = 'xtics'
# Set plot dimensions
plot_dimensions = ['set xrange [%f:%f]' % (xmin, xmax),
'set yrange [%f:%f]' % (ymin, ymax),
'set autoscale x', # extends x axis to next tic mark
'set xlabel "%s"' % xlabel,
'set ylabel "%s"' % ylabel,
'set title "%s"' % title,
'set ytics %s' % ytics,
'set key %s' % key_position,
'set grid %s' % grid_tics,
'set palette %s' % color_book.palette,
'unset colorbox']
# Generate gnuplot rectangle objects
plot_rectangles = (' '.join(['set object %d rectangle' % n,
'from %f, %0.1f' % r.bottomleft,
'to %f, %0.1f' % r.topright,
'fillcolor %s %s' % (color_book.prefix,
r.fillcolor),
'fillstyle solid 0.8'])
for n, r in itertools.izip(itertools.count(1), rectangles))
# Generate gnuplot lines
plot_lines = ['plot ' +
', \\\n\t'.join(' '.join(['-1',
'title "%s"' % t,
'with lines',
'linecolor %s %s ' % (color_book.prefix,
color_book.colors[t]),
'linewidth 6'])
for t in tasks)]
return plot_dimensions, plot_rectangles, plot_lines
def write_data(plot_dimensions, plot_rectangles, plot_lines, fname):
"""
Write plot data out to file or screen.
@param fname: Name of the output file, if specified.
@type fname: C{str} (??)
"""
if fname:
g = open(fname, 'w')
g.write('\n'.join(itertools.chain(plot_dimensions, plot_rectangles,
plot_lines)))
g.close()
else:
print '\n'.join(itertools.chain(plot_dimensions, plot_rectangles,
plot_lines))
def fmt_opt(short, long, arg, text):
if arg:
return '-%s %s, --%s%s\t%s' % (short[:-1], arg, long, arg, text)
else:
return '-%s, --%s\t%s' % (short, long, text)
def make_default_options():
option_values = DummyClass()
option_values.outputfile = ''
option_values.colorfile = ''
option_values.alphasort = False
option_values.plottitle = ''
return option_values
def process_options():
"""
Handle option and command-line argument processing.
@return: Options and gantt input filename.
@rtype: L{OptionParser} options, C{str}
"""
optdefs = [('o:', 'output=', 'FILE', 'Write output to FILE.'),
('c:', 'color=', 'FILE', 'Use task colors (RGB) as defined in '
'configuration FILE (in RGB triplets,\n\t\t\t\tGnuplot '
'colornames, or hexadecimal representations.'),
('a', 'alpha', '', '\t\tShow resources and tasks in '
'alphabetical order.'),
('t:','title=', 'TITLE', 'Set plot title to TITLE (between '
'double quotes).'),
('h', 'help', '', '\t\tShow online help.')]
short_opts = ''.join(opt[0] for opt in optdefs if opt[0])
long_opts = [opt[1] for opt in optdefs if opt[1]]
usage_text = 'gantt.py [options] gantt-file\nwhere\n' + \
'\n'.join(' ' + fmt_opt(*opt) for opt in optdefs)
option_values = make_default_options()
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError, err:
sys.stderr.write("gantt.py: %s\n" % err)
sys.exit(2)
for opt, optval in opts:
if opt in ('-o', '--output'):
option_values.outputfile = optval
continue
if opt in ('-c', '--color'):
option_values.colorfile = optval
continue
if opt in ('-a', '--alphasort'):
option_values.alphasort = True
continue
if opt in ('-t', '--title'):
option_values.plottitle = optval
continue
if opt in ('-h', '--help'):
print usage_text
sys.exit(0)
# Check if correct number of arguments is supplied
if len(args) != 1:
sys.stderr.write('gantty.py: incorrect number of arguments '
'(task/resource file expected)\n')
sys.exit(1)
return option_values, args[0]
def compute(options, ganttfile):
activities = load_ganttfile(ganttfile)
tasks, resources = make_unique_tasks_resources(options.alphasort,
activities)
# Assign indices to resources
resource_map = dict(itertools.izip(resources, itertools.count(1)))
color_book = ColorBook(options.colorfile, tasks)
rectangles = make_rectangles(activities, resource_map, color_book.colors)
plot_dims, plot_rects, plot_lines = \
generate_plotdata(activities, resources, tasks, rectangles,
options, resource_map, color_book)
write_data(plot_dims, plot_rects, plot_lines, options.outputfile)
def run():
options, ganttfile = process_options()
compute(options, ganttfile)
if __name__ == '__main__':
run()
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define abstractions for various MySQL flavors."""
import environment
import logging
import os
import subprocess
class MysqlFlavor(object):
"""Base class with default SQL statements."""
def demote_master_commands(self):
"""Returns commands to stop the current master."""
return [
"SET GLOBAL read_only = ON",
"FLUSH TABLES WITH READ LOCK",
"UNLOCK TABLES",
]
def promote_slave_commands(self):
"""Returns commands to convert a slave to a master."""
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"SET GLOBAL read_only = OFF",
]
def reset_replication_commands(self):
"""Returns commands to reset replication settings."""
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"RESET MASTER",
]
def change_master_commands(self, host, port, pos):
raise NotImplementedError()
def set_semi_sync_enabled_commands(self, master=None, slave=None):
"""Returns commands to turn semi-sync on/off."""
cmds = []
if master is not None:
cmds.append("SET GLOBAL rpl_semi_sync_master_enabled = %d" % master)
if slave is not None:
cmds.append("SET GLOBAL rpl_semi_sync_slave_enabled = %d" % slave)
return cmds
def extra_my_cnf(self):
"""Returns the path to an extra my_cnf file, or None."""
return None
def master_position(self, tablet):
"""Returns the position from SHOW MASTER STATUS as a string."""
raise NotImplementedError()
def position_equal(self, a, b):
"""Returns true if position 'a' is equal to 'b'."""
raise NotImplementedError()
def position_at_least(self, a, b):
"""Returns true if position 'a' is at least as far along as 'b'."""
raise NotImplementedError()
def position_after(self, a, b):
"""Returns true if position 'a' is after 'b'."""
return self.position_at_least(a, b) and not self.position_equal(a, b)
def enable_binlog_checksum(self, tablet):
"""Enables binlog_checksum and returns True if the flavor supports it.
Args:
tablet: A tablet.Tablet object.
Returns:
False if the flavor doesn't support binlog_checksum.
"""
tablet.mquery("", "SET @@global.binlog_checksum=1")
return True
def disable_binlog_checksum(self, tablet):
"""Disables binlog_checksum if the flavor supports it."""
tablet.mquery("", "SET @@global.binlog_checksum=0")
class MariaDB(MysqlFlavor):
"""Overrides specific to MariaDB."""
def reset_replication_commands(self):
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"RESET MASTER",
"SET GLOBAL gtid_slave_pos = ''",
]
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mariadb.cnf"
def master_position(self, tablet):
gtid = tablet.mquery("", "SELECT @@GLOBAL.gtid_binlog_pos")[0][0]
return "MariaDB/" + gtid
def position_equal(self, a, b):
return a == b
def position_at_least(self, a, b):
# positions are MariaDB/A-B-C and we only compare C
return int(a.split("-")[2]) >= int(b.split("-")[2])
def change_master_commands(self, host, port, pos):
gtid = pos.split("/")[1]
return [
"SET GLOBAL gtid_slave_pos = '%s'" % gtid,
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, "
"MASTER_USER='vt_repl', MASTER_USE_GTID = slave_pos" %
(host, port)]
class MariaDB103(MariaDB):
"""Overrides specific to MariaDB 10.3+."""
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mariadb103.cnf"
class MySQL56(MysqlFlavor):
"""Overrides specific to MySQL 5.6/5.7"""
def master_position(self, tablet):
gtid = tablet.mquery("", "SELECT @@GLOBAL.gtid_executed")[0][0]
return "MySQL56/" + gtid
def position_equal(self, a, b):
return subprocess.check_output([
"mysqlctl", "position", "equal", a, b,
]).strip() == "true"
def position_at_least(self, a, b):
return subprocess.check_output([
"mysqlctl", "position", "at_least", a, b,
]).strip() == "true"
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mysql56.cnf"
def change_master_commands(self, host, port, pos):
gtid = pos.split("/")[1]
return [
"RESET MASTER",
"SET GLOBAL gtid_purged = '%s'" % gtid,
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, "
"MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1" %
(host, port)]
class MySQL80(MySQL56):
"""Overrides specific to MySQL 8.0."""
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mysql80.cnf"
# Map of registered MysqlFlavor classes (keyed by an identifier).
flavor_map = {}
MYSQL_FLAVOR = None
# mysql_flavor is a function because we need something to import before the
# variable MYSQL_FLAVOR is initialized, since that doesn't happen until after
# the command-line options are parsed. If we make mysql_flavor a variable and
# import it before it's initialized, the module that imported it won't get the
# updated value when it's later initialized.
def mysql_flavor():
return MYSQL_FLAVOR
def set_mysql_flavor(flavor):
"""Set the object that will be returned by mysql_flavor().
If flavor is not specified, set it based on MYSQL_FLAVOR environment variable.
Args:
flavor: String of the MySQL flavor e.g. "MariaDB" or "MySQL56".
"""
global MYSQL_FLAVOR
if not flavor:
flavor = os.environ.get("MYSQL_FLAVOR", "MariaDB")
# The environment variable might be set, but equal to "".
if not flavor:
flavor = "MariaDB"
v = flavor_map.get(flavor, None)
if not v:
logging.error("Unknown MYSQL_FLAVOR '%s'", flavor)
exit(1)
cls = v["cls"]
env = v["env"]
MYSQL_FLAVOR = cls()
# Set the environment variable explicitly in case we're overriding it via
# command-line flag.
os.environ["MYSQL_FLAVOR"] = env
logging.debug("Using MySQL flavor: %s, setting MYSQL_FLAVOR=%s (%s)",
str(flavor), env, cls)
def register_flavor(flavor, cls, env):
"""Register the available MySQL flavors.
Note: We need the 'env' argument because our internal implementation is
similar to 'MariaDB' (and hence requires MYSQL_FLAVOR=MariaDB) but has its own
flavor class.
Args:
flavor: Name of the flavor (must be passed to test flag --mysql-flavor).
cls: Class which inherits MysqlFlavor and provides the implementation.
env: Value which will be used for the environment variable MYSQL_FLAVOR.
"""
if flavor in flavor_map:
old_cls = flavor_map[flavor]["cls"]
old_env = flavor_map[flavor]["env"]
logging.error("Cannot register MySQL flavor %s because class %s (env: %s)"
" is already registered for it.", flavor, old_cls, old_env)
exit(1)
flavor_map[flavor] = {"cls": cls, "env": env}
register_flavor("MariaDB", MariaDB, "MariaDB")
register_flavor("MariaDB103", MariaDB103, "MariaDB103")
register_flavor("MySQL56", MySQL56, "MySQL56")
register_flavor("MySQL80", MySQL80, "MySQL80")
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains MNIST by feeding data using a single worker.
This version is based on:
https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/examples/tutorials/mnist/fully_connected_feed.py
but adds support for training and prediction on the Google Cloud ML service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import mnist
from tensorflow.python.lib.io import file_io
# Copy of tensorflow.examples.tutorials.mnist.input_data but includes
# support for keys.
from trainer import input_data
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_string('model_dir', 'data', 'Directory to put the model into.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
def placeholder_inputs():
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Returns:
keys_placeholder: Keys placeholder.
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets. We set
# batch_size to None in order to allow us to use a variable number of
# instances for online prediction.
keys_placeholder = tf.placeholder(tf.int64, shape=(None,))
images_placeholder = tf.placeholder(tf.float32, shape=(None,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(None,))
return keys_placeholder, images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
_, images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(tempfile.mkdtemp(), FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels and mark as input.
placeholders = placeholder_inputs()
keys_placeholder, images_placeholder, labels_placeholder = placeholders
inputs = {'key': keys_placeholder.name, 'image': images_placeholder.name}
tf.add_to_collection('inputs', json.dumps(inputs))
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# To be able to extract the id, we need to add the identity function.
keys = tf.identity(keys_placeholder)
# The prediction will be the index in logits with the highest score.
# We also use a softmax operation to produce a probability distribution
# over all possible digits.
prediction = tf.argmax(logits, 1)
scores = tf.nn.softmax(logits)
# Mark the outputs.
outputs = {'key': keys.name,
'prediction': prediction.name,
'scores': scores.name}
tf.add_to_collection('outputs', json.dumps(outputs))
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
# Export the model so that it can be loaded and used later for predictions.
file_io.create_dir(FLAGS.model_dir)
saver.save(sess, os.path.join(FLAGS.model_dir, 'export'))
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# lots of nasty hacks to get ids out of namespaces
#
# Nick -- I understand your point very well now...
#
from utils import valid_idname, valid_guid
from error import MQLParseError, MQLInternalError
from namespace import NameMap
from pymql.error import EmptyResult
from pymql.log import LOG
import mid
class NamespaceFactory:
def __init__(self, querier):
self.querier = querier
self.guids = {}
self.ids = {}
self.namemap = NameMap(self.querier.gc)
self.topic_en = None
self.best_hrid_guid = None
self.forbidden_namespaces = ()
def flush(self):
"""
Completely empty the caches.
This takes care of flushing namespace.py caches as well.
"""
self.guids = {}
self.ids = {}
self.namemap.flush()
def preload(self, varenv):
# load stuff that we know we will need later...
if not self.topic_en:
# it may be set to False after this...
self.topic_en = self.lookup_guid("/en", varenv)
if not self.best_hrid_guid:
g = self.lookup_guid("/freebase/object_hints/best_hrid", varenv)
if not isinstance(g, basestring) or g[0] != "#":
raise MQLInternalError({}, "No /freebase/object_hints/best_hrid")
self.best_hrid_guid = g
if not self.forbidden_namespaces:
self.forbidden_namespaces = \
self.lookup_guids(("/wikipedia/en",
"/wikipedia/en_id",
"/wikipedia/de",
"/user/metaweb/datasource",
"/user/alecf/video_games",
"/user/avh/ellerdale",
"/base/zxspectrum/wos",
"/base/ourairports/ourairports_id",
"/authority",
"/source"),varenv).values()
# this is the only part of low JSON that needs to resolve names.
def lookup_guid(self, name, varenv):
# check that we can't resolve it internally.
if name.startswith("/m/"):
guids = self.lookup_guids_of_mids([name], varenv)
return guids.get(name, False)
id_map = self.internal_lookup_checks([name])
if id_map[name] is not None:
return id_map[name]
return self.namemap.lookup(name, varenv)
def internal_lookup_checks(self, id_list):
# we always use adorned guids here, except at the
# very last stage in QueryPrimitive when
# we generate the actual graph query itself.
retval = {}
for name in id_list:
if isinstance(name, unicode):
name = name.encode("utf-8")
if not isinstance(name, str):
raise MQLParseError(
None, "didn't understand '%(id)s' as an id", id=str(name))
# this implies raw guids are legal in :type and @id fields.
# perhaps only the last is true. Perhaps not even that is true.
elif valid_guid(name):
retval[name] = name
elif name.find("/guid/") == 0:
retval[name] = self.internal_id_to_guid(name)
elif valid_idname(name):
retval[name] = None
else:
raise MQLParseError(None, "'%(id)s' is not a valid guid or id", id=name)
return retval
def internal_id_to_guid(self, id):
# all we know going in is that we start /guid/
if id.find("/guid/") == 0 and len(id) == 38 and valid_guid("#" + id[6:]):
return "#" + id[6:]
elif valid_idname(id):
# well, it isn't a guid, but it's well formed so it just doesn't exist
return False
else:
# it's a mess
raise MQLParseError(None, "'%(id)s' is not a valid guid or id", id=id)
def internal_guid_to_id(self, guid):
# we only call this on a guid known to be OK
return "/guid/" + guid[1:]
def lookup_guids(self, id_list, varenv):
# slightly hacky way to split out the things we can resolve here from the real idnames.
id_map = self.internal_lookup_checks(id_list)
mids = [m for m in id_list if m.startswith("/m/")]
if mids:
id_map.update(self.lookup_guids_of_mids(mids, varenv))
varenv["gr_log_code"] = "id2guid"
next_step = [id for id in id_map if id_map[id] is None]
lookup_map = self.namemap.lookup_multiple(next_step, varenv)
id_map.update(lookup_map)
varenv.pop("gr_log_code")
return id_map
# nasty hacky function which contains "best available" namespace resolution.
def lookup_id_internal(self, guid, varenv):
if guid is None:
return None
elif guid in self.guids:
return self.guids[guid]
else:
found_id = self.lookup_id_query(guid, varenv)
self.guids[guid] = found_id
return found_id
# eek. see https://wiki.metaweb.com/index.php/Machine_IDs
def lookup_guids_of_mids(self, mid_list, varenv):
ask_list = set()
result = {}
rev = {}
# arithmetically compute guids
for m in mid_list:
try:
guid = "#" + mid.to_guid(m)
ask_list.add(guid)
# store the whole list here, down below we'll just
# overwrite the things we got back.
result[m] = guid #self.internal_guid_to_id(guid)
# i need to go back + forth.
rev[guid] = m
except (mid.InvalidMIDVersion, mid.InvalidMID) as e:
result[m] = False
except (mid.InvalidMunch) as e:
raise MQLParseError(
None, "'%(mid)s' is not a properly formatted mid", mid=m)
if not len(ask_list):
return result
# i'm not caching these.
LOG.debug(
"mql.resolve.mids", "Looking up guids for mids", code=len(ask_list))
# look for replaced by links off the guids
# replaced_by links are unique, if they arent then this will signify some
# end-of-the-world type event.
query = [{"@guid": ask_list, "replaced_by": {"@guid": None}}]
# read
varenv["gr_log_code"] = "guids2mids"
query_results = self.querier.read(query, varenv)
varenv.pop("gr_log_code")
# "now see what we found out..."
for item in query_results:
# [guid, replaced_by { guid }]
guid = item["@guid"]
rep_by = item["replaced_by"]["@guid"]
m = rev[guid]
result[m] = rep_by
# pray.
return result
# harder
def lookup_mids_of_guids(self, guid_list, varenv):
# It's..sort of the same as before. We have some guids,
# see if any of them are replaced_by.
# If they are,
if not guid_list:
return {}
ask_list = set()
result = {}
rev = {}
for g in guid_list:
# convert the mid directly.
m = mid.of_guid(g[1:])
ask_list.add(g)
result[g] = [m]
rev[m] = g
LOG.debug("mql.lookup.mids", "Looking up mids for guids")
# we look foward, up replaced_by links, and from that node
# to other replaced_by links,
# and backwards from the root, for previous ones.
# +-+ r.b. +-+
# |A| -------> |B|
# +-+ +-+
# |
# +-+ |
# |C|-----------+
# +-+
#
# in this diagram, we root at B.
# We list B first but also A and C if present.
query = [{
"@guid": ask_list,
"@pagesize": len(ask_list) + 1,
"-replaced_by": [{
"@guid": None,
":optional": True
}]
}]
varenv["gr_log_code"] = "mids2guids"
query_results = self.querier.read(query, varenv)
varenv.pop("gr_log_code")
# each result is going to (hopefully) either haave a -replaced_by link
# or a replaced_by one.
for item in query_results:
guid = item["@guid"]
# otherwise, theres just links pointing at me.
if item["-replaced_by"]:
# me first
result[guid] = [mid.of_guid(guid[1:])]
# then everyone else
for r in item["-replaced_by"]:
result[guid].append(mid.of_guid(r["@guid"][1:]))
return result
def lookup_id(self, guid, varenv):
# this function needs to have exactly the same semantics as
# lookup_ids() (which now contains the "official" semantics)
if isinstance(guid, unicode):
guid = guid.encode("utf-8")
return self.lookup_ids([guid], varenv)[guid]
def lookup_ids(self, guid_list, varenv):
"""
Given a list of guids returns an id for each one,
using as few queries as possible.
Returns a dictionary of guid->id.
"""
ask_list = set()
result = {}
if not "asof" in varenv:
# Step 1: maybe we already know.
for guid in guid_list:
if isinstance(guid, unicode):
guid = guid.encode("utf-8")
if guid in self.guids:
LOG.debug(
"mql.lookup.id.cached",
"found %s in cache" % guid,
value=self.guids[guid])
result[guid] = self.guids[guid]
elif guid not in ask_list:
ask_list.add(guid)
cache = len(ask_list) < 10000
else:
for guid in guid_list:
if isinstance(guid, unicode):
guid = guid.encode("utf-8")
ask_list.add(guid)
cache = False
if not ask_list:
return result
LOG.debug("mql.lookup.ids", "Lookup ids", code=len(ask_list))
self.preload(varenv)
# Step 2: resolve the ask_list
query = [{
"@guid": ask_list,
"@pagesize": len(ask_list) + 1,
"best_hrid": [{
":typeguid": self.best_hrid_guid,
":value": None,
":optional": True,
}],
"-has_key": [{
":value":
None,
":optional":
True,
":comparator":
"octet",
":pagesize":
1000,
"@guid":
None,
"-has_key": [{
":value":
None,
":optional":
True,
":comparator":
"octet",
"@guid":
None,
"-has_key": [{
":value": None,
":optional": True,
":comparator": "octet",
"@guid": None,
}]
}]
}],
"is_instance_of": {
"@id": "/type/namespace",
":optional": True
}
}]
varenv["gr_log_code"] = "guid2id"
query_results = self.querier.read(query, varenv)
varenv.pop("gr_log_code")
LOG.debug("mql.lookup.id.results", "", results=query_results)
# now see what we found out...
# these should be cached.
leftover_guids = []
for item in query_results:
res = self.search_id_result(item, varenv)
if res:
result[item["@guid"]] = res
if cache:
self.guids[item["@guid"]] = res
# every guid in guid_list has to be present in the result.
for guid in guid_list:
if guid not in result:
LOG.debug("mql.lookup.id.notfound", "midifying %s" % guid)
result[guid] = mid.of_guid(guid[1:])
return result
def search_id_result(self, head, varenv):
"""
take the id result struct and attempt to produce an id.
Here are the rules:
- best_hrid is chosen if present
- the shortest name is best
- except that any three level name is better than a /boot name.
- among names of the same length, pick any one at random.
"""
hrids = head["best_hrid"]
if hrids:
if len(hrids) > 1:
# This should never happen.
# If it does, log an error but don't fail.
LOG.error("mql.resolve.best_hrid",
"multiple /freebase/object_hints/best_hrid")
hrid = hrids[0][":value"]
return hrid
# bfs_list format is an array of
# ( value, parent, guid, keys, depth )
bfs_list = [(None, None, head["@guid"], head.get("-has_key", []), 0)]
root = self.namemap.bootstrap.root_namespace
boot = self.namemap.bootstrap.boot
is_namespace = False
if isinstance(head["is_instance_of"], dict):
is_namespace = True
has_boot = None
if head["@guid"] == root:
return "/"
elif head["@guid"] == boot:
return "/boot"
while bfs_list:
front = bfs_list.pop(0)
for item in front[3]:
bfs_item = (item[":value"], front, item["@guid"],
item.get("-has_key", []), front[4] + 1)
if bfs_item[2] == root:
# we're done - what are we called?
rv = []
pos = bfs_item
while pos[1]:
rv.append(pos[0])
pos = pos[1]
return "/" + "/".join(rv)
elif bfs_item[2] == boot:
has_boot = bfs_item
elif (self.topic_en and bfs_item[2] == self.topic_en and
bfs_item[4] == 1):
# hack for things *directly* in /en to short circuit early...
return "/en/" + bfs_item[0]
elif not is_namespace and bfs_item[2] in self.forbidden_namespaces:
# terminate recursion at /wikipedia/en etc.
pass
else:
bfs_list.append(bfs_item)
# are we in /boot?
if has_boot and has_boot[4] == 1:
return "/boot/" + has_boot[0]
# ok, we've searched the entire list. front is the last item...
# try a regular lookup_id() on it. (so we can cache it too!)
if front[4] == 3:
leading_id = self.lookup_id_internal(front[2], varenv)
if leading_id and leading_id[0] == "/":
# we got something...
rv = [leading_id]
pos = front
while pos[1]:
rv.append(pos[0])
pos = pos[1]
return "/".join(rv)
# failure
return None
def lookup_id_query(self, guid, varenv):
"""
lots of nasty heuristics to find ids for id-identified objects:
This seems to be called when lookup_ids can't finish the job,
but it also seems to duplicate some of the logic there.
Current rules:
- do we have a /freebase/object_hints/best_hrid property
- do we have a name in /
- do we have a name in /XXX/YYY (XXX not boot or pub)
- do we have a name in /XXX/YYY/ZZZ
- do we have a name in /XXX/YYY (XXX may be boot or pub)
- ask namespace.lookup_by_guid_oneoff...
All of this trouble is mostly because some things (/type/object/type
being the best example) have names in /bootstrap-namespace
that we dont want to expose by accident.
"""
query = {
"@guid":
guid,
"best_hrid": [{
":typeguid": self.best_hrid_guid,
":value": None,
":optional": True,
}],
"has_root_name": [{
":type": "has_key",
":comparator": "octet",
":reverse": True,
":value": None,
":optional": True,
"@id": "/"
}],
"has_2_level_name": [{
":type": "has_key",
":comparator": "octet",
":reverse": True,
":value": None,
":optional": True,
"-has_key": [{
":comparator": "octet",
":value": None,
"@id": "/"
}]
}],
"has_3_level_name": [{
":type":
"has_key",
":comparator":
"octet",
":reverse":
True,
":value":
None,
":optional":
True,
"-has_key": [{
":comparator":
"octet",
":value":
None,
"-has_key": [{
":comparator": "octet",
":value": None,
"@id": "/"
}]
}]
}],
}
try:
varenv["gr_log_code"] = "guid2id"
result = self.querier.read(query, varenv)
varenv.pop("gr_log_code")
except EmptyResult:
# everything was optional so we must not have found the guid itself
# this code is unnecessary, but has key documentation value.
raise
# we may get nothing back if the guid has been deleted (or if we were deleting it)
# in that case, just return the guid.
if result is None:
return guid
idname = None
hrids = result["best_hrid"]
if hrids:
if len(hrids) > 1:
# This should never happen.
# If it does, log an error but don't fail.
LOG.error("mql.resolve.lookup_id_internal",
"multiple /freebase/object_hints/best_hrid")
hrid = hrids[0][":value"]
return hrid
if result["has_root_name"]:
idname = "/" + result["has_root_name"][0][":value"]
elif (
result["has_2_level_name"] and
result["has_2_level_name"][0]["-has_key"][0][":value"] not in ("boot",
"pub")):
idname = "/" + result["has_2_level_name"][0]["-has_key"][0][
":value"] + "/" + result["has_2_level_name"][0][":value"]
elif result["has_3_level_name"]:
idname = (
"/" +
result["has_3_level_name"][0]["-has_key"][0]["-has_key"][0][":value"]
+ "/" + result["has_3_level_name"][0]["-has_key"][0][":value"] + "/" +
result["has_3_level_name"][0][":value"])
elif result["has_2_level_name"]:
idname = "/" + result["has_2_level_name"][0]["-has_key"][0][
":value"] + "/" + result["has_2_level_name"][0][":value"]
else:
idname = self.namemap.lookup_by_guid_oneoff(guid, varenv)
# special hack for the root namespace
if idname == "/boot/root_namespace":
return "/"
elif idname == "/boot/root_user":
return "/user/root"
elif idname is not None and valid_idname(idname):
return idname
else:
return guid
| |
#
# io_rgb.py -- RGB image file handling.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, time
import os
import numpy
import mimetypes
import hashlib
from io import BytesIO
from ginga.util import paths
from ginga.util.six.moves import map, zip
try:
# do we have Python Imaging Library available?
import PIL.Image as PILimage
from PIL.ExifTags import TAGS
have_pil = True
except ImportError:
have_pil = False
# We only need one of { have_pilutil, have_qtimage }, but both have
# their strengths
have_pilutil = False
have_qtimage = False
try:
from scipy.misc import imresize, imsave, toimage, fromimage
have_pilutil = True
except ImportError:
pass
# Qt can be used as a replacement for PIL
if not have_pilutil:
try:
from ginga.qtw.QtHelp import QImage, QColor, QtCore
have_qtimage = True
except ImportError as e:
pass
# EXIF library for getting metadata, in the case that we don't have PIL
try:
import EXIF
have_exif = True
except ImportError:
have_exif = False
# How about color management (ICC profile) support?
try:
import PIL.ImageCms as ImageCms
have_cms = True
except ImportError:
have_cms = False
basedir = paths.ginga_home
# Color Management configuration
profile = {}
for filename in ('working.icc', 'monitor.icc', 'sRGB.icc', 'AdobeRGB.icc'):
profname, ext = os.path.splitext(filename)
profile[profname] = os.path.join(basedir, "profiles", filename)
rendering_intent = 0
# Prepare common transforms
transform = {}
# Build transforms for profile conversions for which we have profiles
if have_cms:
rendering_intent = ImageCms.INTENT_PERCEPTUAL
for inprof, outprof in [('sRGB', 'working'), ('AdobeRGB', 'working'), ('working', 'monitor')]:
if os.path.exists(profile[inprof]) and os.path.exists(profile[outprof]):
transform[(inprof, outprof)] = ImageCms.buildTransform(profile[inprof],
profile[outprof],
'RGB', 'RGB',
renderingIntent=rendering_intent,
flags=0)
# For testing...
#have_qtimage = False
#have_pilutil = False
#have_pil = False
#have_cms = False
class RGBFileHandler(object):
def __init__(self, logger):
self.logger = logger
def load_file(self, filepath, header):
return self._imload(filepath, header)
def save_file_as(self, filepath, data_np, header):
if not have_pil:
raise ImageError("Install PIL to be able to save images")
# TODO: save keyword metadata!
imsave(filepath, data_np)
def _imload(self, filepath, kwds):
"""Load an image file, guessing the format, and return a numpy
array containing an RGB image. If EXIF keywords can be read
they are returned in the dict _kwds_.
"""
start_time = time.time()
typ, enc = mimetypes.guess_type(filepath)
if not typ:
typ = 'image/jpeg'
typ, subtyp = typ.split('/')
self.logger.debug("MIME type is %s/%s" % (typ, subtyp))
if (typ == 'image') and (subtyp in ('x-portable-pixmap',
'x-portable-greymap')):
# Special opener for PPM files, preserves high bit depth
means = 'built-in'
data_np = open_ppm(filepath)
elif have_pil:
# PIL seems to be the faster loader than QImage, and can
# return EXIF info, where QImage will not.
means = 'PIL'
image = PILimage.open(filepath)
try:
info = image._getexif()
for tag, value in info.items():
kwd = TAGS.get(tag, tag)
kwds[kwd] = value
except Exception as e:
self.logger.warn("Failed to get image metadata: %s" % (str(e)))
# If we have a working color profile then handle any embedded
# profile or color space information, if possible
if have_cms and os.path.exists(profile['working']):
# Assume sRGB image, unless we learn to the contrary
in_profile = 'sRGB'
try:
if 'icc_profile' in image.info:
self.logger.debug("image has embedded color profile")
buf_profile = image.info['icc_profile']
# Write out embedded profile (if needed)
prof_md5 = hashlib.md5(buf_profile).hexdigest()
in_profile = "/tmp/_image_%d_%s.icc" % (
os.getpid(), prof_md5)
if not os.path.exists(in_profile):
with open(in_profile, 'w') as icc_f:
icc_f.write(buf_profile)
# see if there is any EXIF tag about the colorspace
elif 'ColorSpace' in kwds:
csp = kwds['ColorSpace']
iop = kwds.get('InteroperabilityIndex', None)
if (csp == 0x2) or (csp == 0xffff):
# NOTE: 0xffff is really "undefined" and should be
# combined with a test of EXIF tag 0x0001
# ('InteropIndex') == 'R03', but PIL _getexif()
# does not return the InteropIndex
in_profile = 'AdobeRGB'
self.logger.debug("hmm..this looks like an AdobeRGB image")
elif csp == 0x1:
self.logger.debug("hmm..this looks like a sRGB image")
in_profile = 'sRGB'
else:
self.logger.debug("no color space metadata, assuming this is an sRGB image")
# if we have a valid profile, try the conversion
tr_key = (in_profile, 'working')
if tr_key in transform:
# We have am in-core transform already for this (faster)
image = convert_profile_pil_transform(image, transform[tr_key],
inPlace=True)
else:
# Convert using profiles on disk (slower)
if in_profile in profile:
in_profile = profile[in_profile]
image = convert_profile_pil(image, in_profile,
profile['working'])
self.logger.info("converted from profile (%s) to profile (%s)" % (
in_profile, profile['working']))
except Exception as e:
self.logger.error("Error converting from embedded color profile: %s" % (str(e)))
self.logger.warn("Leaving image unprofiled.")
data_np = numpy.array(image)
elif have_qtimage:
# QImage doesn't give EXIF info, so use 3rd-party lib if available
if have_exif:
with open(filepath, 'rb') as in_f:
d = EXIF.process_file(in_f)
kwds.update(d)
means = 'QImage'
qimage = QImage()
qimage.load(filepath)
data_np = qimage2numpy(qimage)
else:
raise ImageError("No way to load image format '%s/%s'" % (
typ, subtyp))
end_time = time.time()
self.logger.debug("loading (%s) time %.4f sec" % (
means, end_time - start_time))
return data_np
def imload(self, filepath, kwds):
return self._imload(filepath, kwds)
def get_thumb(self, filepath):
if not have_pil:
raise Exception("Install PIL to use this method")
if not have_exif:
raise Exception("Install EXIF to use this method")
with open(filepath, 'rb') as in_f:
try:
d = EXIF.process_file(in_f)
except Exception as e:
return None
if 'JPEGThumbnail' in d:
buf = d['JPEGThumbnail']
# TODO: other possible encodings?
else:
return None
image = PILimage.open(BytesIO.BytesIO(buf))
data_np = numpy.array(image)
return data_np
def get_buffer(self, data_np, header, format, output=None):
"""Get image as a buffer in (format).
Format should be 'jpeg', 'png', etc.
"""
if not have_pil:
raise Exception("Install PIL to use this method")
image = PILimage.fromarray(data_np)
buf = output
if buf is None:
buf = BytesIO()
image.save(buf, format)
contents = buf.getvalue()
if output is None:
buf.close()
return contents
def imresize(self, data, new_wd, new_ht, method='bilinear'):
"""Scale an image in numpy array _data_ to the specified width and
height. A smooth scaling is preferred.
"""
old_ht, old_wd = data.shape[:2]
start_time = time.time()
if have_qtimage:
# QImage method is slightly faster and gives a smoother looking
# result than PIL
means = 'QImage'
qimage = numpy2qimage(data)
if (old_wd != new_wd) or (old_ht != new_ht):
# NOTE: there is a strange bug in qimage.scaled if the new
# dimensions are exactly the same--so we check and only
# scale if there is some difference
qimage = qimage.scaled(new_wd, new_ht,
transformMode=QtCore.Qt.SmoothTransformation)
newdata = qimage2numpy(qimage)
else:
newdata = data
elif have_pilutil:
means = 'PIL'
zoom_x = float(new_wd) / float(old_wd)
zoom_y = float(new_ht) / float(old_ht)
if (old_wd >= new_wd) or (old_ht >= new_ht):
# data size is bigger, skip pixels
zoom = max(zoom_x, zoom_y)
else:
zoom = min(zoom_x, zoom_y)
newdata = imresize(data, zoom, interp=method)
else:
raise ImageError("No way to scale image smoothly")
end_time = time.time()
self.logger.debug("scaling (%s) time %.4f sec" % (
means, end_time - start_time))
return newdata
# UTILITY FUNCTIONS
def open_ppm(filepath):
infile = open(filepath,'rb')
# Get type: PPM or PGM
header = infile.readline()
ptype = header.strip().upper()
if ptype == b'P5':
depth = 1
elif ptype == b'P6':
depth = 3
#print header
# Get image dimensions
header = infile.readline().strip()
while header.startswith(b'#') or len(header) == 0:
header = infile.readline().strip()
#print(header)
width, height = tuple(map(int, header.split()))
header = infile.readline()
# Get unit size
maxval = int(header)
if maxval <= 255:
dtype = numpy.uint8
elif maxval <= 65535:
dtype = numpy.uint16
#print width, height, maxval
# read image
if depth > 1:
arr = numpy.fromfile(infile, dtype=dtype).reshape((height, width,
depth))
else:
arr = numpy.fromfile(infile, dtype=dtype).reshape((height, width))
if sys.byteorder == 'little':
arr = arr.byteswap()
return arr
# --- Credit ---
# the following function set by Hans Meine was found here:
# http://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
#
# see also a newer version at
# http://kogs-www.informatik.uni-hamburg.de/~meine/software/qimage2ndarray/
#
def qimage2numpy(qimage):
"""Convert QImage to numpy.ndarray."""
#print "FORMAT IS %s" % str(qimage.format())
result_shape = (qimage.height(), qimage.width())
temp_shape = (qimage.height(),
qimage.bytesPerLine() * 8 / qimage.depth())
if qimage.format() in (QImage.Format_ARGB32_Premultiplied,
QImage.Format_ARGB32,
QImage.Format_RGB32):
dtype = numpy.uint8
result_shape += (4, )
temp_shape += (4, )
else:
raise ValueError("qimage2numpy only supports 32bit and 8bit images")
# FIXME: raise error if alignment does not match
buf = qimage.bits()
if hasattr(buf, 'asstring'):
# Qt4
buf = bytes(buf.asstring(qimage.numBytes()))
else:
# PySide
buf = bytes(buf)
result = numpy.frombuffer(buf, dtype).reshape(temp_shape)
if result_shape != temp_shape:
result = result[:,:result_shape[1]]
# QImage loads the image as BGRA, we want RGB
#res = numpy.dstack((result[:, :, 2], result[:, :, 1], result[:, :, 0]))
res = numpy.empty((qimage.height(), qimage.width(), 3))
res[:, :, 0] = result[:, :, 2]
res[:, :, 1] = result[:, :, 1]
res[:, :, 2] = result[:, :, 0]
return res
def numpy2qimage(array):
if numpy.ndim(array) == 2:
return gray2qimage(array)
elif numpy.ndim(array) == 3:
return rgb2qimage(array)
raise ValueError("can only convert 2D or 3D arrays")
def gray2qimage(gray):
"""Convert the 2D numpy array `gray` into a 8-bit QImage with a gray
colormap. The first dimension represents the vertical image axis.
ATTENTION: This QImage carries an attribute `ndarray` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!)."""
if len(gray.shape) != 2:
raise ValueError("gray2QImage can only convert 2D arrays")
h, w = gray.shape
bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')
bgra[...,0] = gray
bgra[...,1] = gray
bgra[...,2] = gray
bgra[...,3].fill(255)
fmt = QImage.Format_RGB32
result = QImage(bgra.data, w, h, fmt)
result.ndarray = bgra
return result
def rgb2qimage(rgb):
"""Convert the 3D numpy array `rgb` into a 32-bit QImage. `rgb` must
have three dimensions with the vertical, horizontal and RGB image axes.
ATTENTION: This QImage carries an attribute `ndarray` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!)."""
if len(rgb.shape) != 3:
raise ValueError("rgb2QImage can only convert 3D arrays")
if rgb.shape[2] not in (3, 4):
raise ValueError("rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels")
h, w, channels = rgb.shape
# Qt expects 32bit BGRA data for color images:
bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')
bgra[...,0] = rgb[...,2]
bgra[...,1] = rgb[...,1]
bgra[...,2] = rgb[...,0]
if rgb.shape[2] == 3:
bgra[...,3].fill(255)
fmt = QImage.Format_RGB32
else:
bgra[...,3] = rgb[...,3]
fmt = QImage.Format_ARGB32
result = QImage(bgra.data, w, h, fmt)
result.ndarray = bgra
return result
# --- end QImage to numpy conversion functions ---
# --- Color Management conversion functions ---
def convert_profile_pil(image_pil, inprof_path, outprof_path, inPlace=False):
if not have_cms:
return image_pil
image_out = ImageCms.profileToProfile(image_pil, inprof_path,
outprof_path,
renderingIntent=rendering_intent,
outputMode='RGB', inPlace=inPlace,
flags=0)
if inPlace:
return image_pil
return image_out
def convert_profile_pil_transform(image_pil, transform, inPlace=False):
if not have_cms:
return image_pil
image_out = ImageCms.applyTransform(image_pil, transform, inPlace)
if inPlace:
return image_pil
return image_out
def convert_profile_numpy(image_np, inprof_path, outprof_path):
if (not have_pilutil) or (not have_cms):
return image_np
in_image_pil = toimage(image_np)
out_image_pil = convert_profile_pil(in_image_pil,
inprof_path, outprof_path)
image_out = fromimage(out_image_pil)
return image_out
def convert_profile_numpy_transform(image_np, transform):
if (not have_pilutil) or (not have_cms):
return image_np
in_image_pil = toimage(image_np)
convert_profile_pil_transform(in_image_pil, transform, inPlace=True)
image_out = fromimage(in_image_pil)
return image_out
def have_monitor_profile():
return ('working', 'monitor') in transform
def convert_profile_monitor(image_np):
output_transform = transform[('working', 'monitor')]
out_np = convert_profile_numpy_transform(image_np, output_transform)
return out_np
def set_rendering_intent(intent):
"""
Sets the color management attribute rendering intent.
Parameters
----------
intent: integer
0: perceptual, 1: relative colorimetric, 2: saturation,
3: absolute colorimetric
"""
global rendering_intent
rendering_intent = intent
#END
| |
# -*- coding: utf-8 -*-
import os
import numpy as np
import logging
from PyMca5.PyMcaIO import ConfigDict
from ..io import localfs
from ..io import spe
from ..utils import subprocess
logger = logging.getLogger(__name__)
def proc_result(args, out, err, returncode):
success = returncode == 0
if not success:
print("Failed: " + " ".join(args))
if err:
print("XMIMSIM errors:")
print(err)
return success
def installed():
return subprocess.installed("xmimsim")
def execute(args, cwd):
logger.info("EXECUTE: cd {}; {}".format(cwd, " ".join(args)))
out, err, returncode = subprocess.execute(*args, cwd=cwd, stderr=True)
if isinstance(out, bytes):
out = out.decode()
if isinstance(err, bytes):
err = err.decode()
return proc_result(args, out, err, returncode)
def pymcacfg_add_mcinfo(
configdict,
outpath,
p_polarisation=1,
ninteractions=1,
multiplicity=100000,
source_distance=100,
has_atmosphere=False,
beamsize=1e-4,
):
# PyMca5.PyMcaGui.physics.xrf.XRFMCPyMca.XRFMCParameters
configdict["xrfmc"] = {}
mcsetup = configdict["xrfmc"]["setup"] = {}
mcsetup["p_polarisation"] = p_polarisation
# Point source
mcsetup["source_diverg_x"] = 0
mcsetup["source_diverg_y"] = 0
mcsetup["source_size_x"] = 0
mcsetup["source_size_y"] = 0
mcsetup["source_sample_distance"] = source_distance
# Divergence determined by slits
# divergence = np.arctan2(beamsize*0.5, source_distance)
# = np.arctan2(slit_width*0.5, slit_distance)
slit_distance = source_distance / 2.0
slit_width = beamsize / 2.0
mcsetup["slit_distance"] = slit_distance
mcsetup["slit_width_x"] = slit_width
mcsetup["slit_width_y"] = slit_width
mcsetup["nmax_interaction"] = ninteractions
# first non-atmospheric layer
if has_atmosphere:
mcsetup["layer"] = 2
else:
mcsetup["layer"] = 1
mcsetup["output_dir"] = outpath
mcsetup["histories"] = multiplicity
attenuators = configdict["attenuators"]
for name in "BeamFilter0", "BeamFilter1", "Absorber":
if name not in attenuators:
attenuators[name] = [0, "-", 0.0, 0.0, 1.0]
# TODO: xmimsim-pymca expects a fixed number of layers
multilayer = configdict.get("multilayer")
if multilayer:
nlayers = len(multilayer)
for i in range(nlayers, 10):
multilayer["Layer{}".format(i)] = [0, "None", 0, 0]
def pymcacfg_to_xmimsimcfg(pymcacfg, xmimsimcfg, **kwargs):
configdict = ConfigDict.ConfigDict(filelist=pymcacfg)
outpath = os.path.dirname(xmimsimcfg)
pymcacfg_add_mcinfo(configdict, outpath, **kwargs)
configdict.write(xmimsimcfg)
def run_xmimsim_pymca(xmimsimcfg, xmso, pileup=True, escape=True):
xmsopath = os.path.dirname(xmso)
xmsofile = os.path.basename(xmso)
basename = os.path.splitext(xmsofile)[0]
args = [
"xmimsim-pymca",
"--spe-file-unconvoluted={}_lines".format(basename),
"--spe-file={}_convoluted".format(basename),
"--verbose",
"--enable-single-run",
]
if pileup:
args.append("--enable-pile-up")
else:
args.append("--disable-pile-up")
if escape:
args.append("--enable-escape-peaks")
else:
args.append("--disable-escape-peaks")
args += [xmimsimcfg, xmsofile]
return execute(args, xmsopath)
def xmso_to_xmsi(xmso, xmsi):
xmsipath = os.path.dirname(xmsi)
args = ["xmso2xmsi", xmso, xmsi]
if execute(args, xmsipath):
patch_xmsi(xmso, xmsi)
return True
else:
return False
def patch_xmsi(xmso, xmsi):
with open(xmsi) as f:
content = f.readlines()
content = [x.rstrip() for x in content]
try:
i = content.index(r" <outputfile/>")
content[i] = r" <outputfile>{}</outputfile>".format(xmso)
except ValueError:
pass
try:
i = content.index(r" <pulse_width>0</pulse_width>")
content[i] = r" <pulse_width>1e-12</pulse_width>"
except ValueError:
pass
try:
i = [r"1e-5</pulse_width>" in line for line in content].index(True)
content[i] = content[i].replace(r"1e-5</pulse_width>", r"1e-12</pulse_width>")
except ValueError:
pass
with open(xmsi, mode="w") as f:
f.write("\n".join(content))
def xmsi_to_xrmc(xmsi, xrmcpath, outpath, basename, pileup=True):
args = ["xmsi2xrmc", xmsi]
if pileup:
args.append("--enable-pile-up")
else:
args.append("--disable-pile-up")
root = str(outpath[basename])
args.append("--convoluted-file={}_convoluted.dat".format(root))
args.append("--unconvoluted-file={}_lines.dat".format(root))
if not execute(args, str(xrmcpath)):
return False
# TODO: xrmc issue #49
lines = []
with open(str(xrmcpath["detector.dat"]), mode="r") as f:
lines = f.readlines()
with open(str(xrmcpath["detector.dat"]), mode="w") as f:
for line in lines:
if line.startswith("FanoFactor"):
line = "FanoFactor 1e-10\n"
elif line.startswith("Noise"):
line = "Noise 1e-10\n"
f.write(line)
return True
def run(
outpath,
pymcahandle=None,
pymcacfg=None,
pileup=True,
escape=True,
outradix="out",
runxrmc=False,
**kwargs
):
"""
Args:
outpath(str):
pymcahandle(PyMcaHandle): overwrites all other arguments
pymcacfg(str)
pileup(bool)
escape(bool)
outradix(str)
runxrmc(bool): for comparison
**kwargs: see `pymcacfg_add_mcinfo`
Returns:
success(bool)
"""
outpath = localfs.Path(outpath).mkdir()
if pymcahandle is not None:
pymcacfg = str(outpath["{}.cfg".format(outradix)])
pymcahandle.savepymca(pymcacfg)
pileup = pymcahandle.pileup
escape = pymcahandle.escape
kwargs["ninteractions"] = pymcahandle.ninteractions
else:
pymcacfg = str(localfs.Path(pymcacfg).copy(outpath[pymcacfg]))
xmimsimcfg = str(outpath["{}_xmimsim.cfg".format(outradix)])
pymcacfg_to_xmimsimcfg(pymcacfg, xmimsimcfg, **kwargs)
xmso = str(outpath["{}.xmso".format(outradix)])
if not run_xmimsim_pymca(xmimsimcfg, xmso, escape=escape, pileup=pileup):
return False
xmsi = str(outpath["{}.xmsi".format(outradix)])
if not xmso_to_xmsi(xmso, xmsi):
return False
if runxrmc:
xrmcpath = outpath["xrmc"].mkdir()
xrmcoutpath = xrmcpath["output"].mkdir()
if not xmsi_to_xrmc(
xmsi,
xrmcpath,
xrmcoutpath,
outradix,
pileup=pileup,
):
return False
if not execute(["xrmc", "input.dat"], str(xrmcpath)):
return False
return True
def loadxmimsimresult(outpath, outradix="out", convoluted=False):
outpath = localfs.Path(outpath)
if convoluted:
suffix = "convoluted"
else:
suffix = "lines"
fmt = "{}_{}_{{}}.spe".format(outradix, suffix)
i = 0
while outpath[fmt.format(i + 1)].exists:
i += 1
filename = str(outpath[fmt.format(i)])
mca, channels, energy, coeff = spe.read(filename)
zero, gain = coeff
info = {"xenergy": energy, "zero": zero, "gain": gain}
return mca, info
| |
import sqlite3
import os
try:
import json
except ImportError:
import simplejson as json
import sys
import xml.sax
import binascii
from vincenty import vincenty
from struct import pack, unpack
from rtree import Rtree
def cons(ary):
for i in range(len(ary)-1):
yield (ary[i], ary[i+1])
def pack_coords(coords):
return binascii.b2a_base64( "".join([pack( "ff", *coord ) for coord in coords]) )
def unpack_coords(str):
bin = binascii.a2b_base64( str )
return [unpack( "ff", bin[i:i+8] ) for i in range(0, len(bin), 8)]
class Node:
def __init__(self, id, lon, lat):
self.id = id
self.lon = lon
self.lat = lat
self.tags = {}
def __repr__(self):
return "<Node id='%s' (%s, %s) n_tags=%d>"%(self.id, self.lon, self.lat, len(self.tags))
class Way:
def __init__(self, id):
self.id = id
self.nd_ids = []
self.tags = {}
def __repr__(self):
return "<Way id='%s' n_nds=%d n_tags=%d>"%(self.id, len(self.nd_ids), len(self.tags))
class WayRecord:
def __init__(self, id, tags, nds):
self.id = id
if type(tags)==unicode:
self.tags_str = tags
self.tags_cache = None
else:
self.tags_cache = tags
self.tags_str = None
if type(nds)==unicode:
self.nds_str = nds
self.nds_cache = None
else:
self.nds_cache = nds
self.nds_str = None
@property
def tags(self):
self.tags_cache = self.tags_cache or json.loads(self.tags_str)
return self.tags_cache
@property
def nds(self):
self.nds_cache = self.nds_cache or json.loads(self.nds_str)
return self.nds_cache
def __repr__(self):
return "<WayRecord id='%s'>"%self.id
class OSMDB:
def __init__(self, dbname,overwrite=False,rtree_index=True):
if overwrite:
try:
os.remove( dbname )
except OSError:
pass
self.conn = sqlite3.connect(dbname)
if rtree_index:
self.index = Rtree( dbname )
else:
self.index = None
if overwrite:
self.setup()
def setup(self):
c = self.conn.cursor()
c.execute( "CREATE TABLE nodes (id TEXT, tags TEXT, lat FLOAT, lon FLOAT, endnode_refs INTEGER DEFAULT 1)" )
c.execute( "CREATE TABLE ways (id TEXT, tags TEXT, nds TEXT)" )
self.conn.commit()
c.close()
def create_indexes(self):
c = self.conn.cursor()
c.execute( "CREATE INDEX nodes_id ON nodes (id)" )
c.execute( "CREATE INDEX nodes_lon ON nodes (lon)" )
c.execute( "CREATE INDEX nodes_lat ON nodes (lat)" )
c.execute( "CREATE INDEX ways_id ON ways (id)" )
self.conn.commit()
c.close()
def populate(self, osm_filename, accept=lambda tags: True, reporter=None):
print "importing osm from XML to sqlite database"
c = self.conn.cursor()
self.n_nodes = 0
self.n_ways = 0
superself = self
class OSMHandler(xml.sax.ContentHandler):
@classmethod
def setDocumentLocator(self,loc):
pass
@classmethod
def startDocument(self):
pass
@classmethod
def endDocument(self):
pass
@classmethod
def startElement(self, name, attrs):
if name=='node':
self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))
elif name=='way':
self.currElem = Way(attrs['id'])
elif name=='tag':
self.currElem.tags[attrs['k']] = attrs['v']
elif name=='nd':
self.currElem.nd_ids.append( attrs['ref'] )
@classmethod
def endElement(self,name):
if name=='node':
if superself.n_nodes%5000==0:
print "node %d"%superself.n_nodes
superself.n_nodes += 1
superself.add_node( self.currElem, c )
elif name=='way':
if superself.n_ways%5000==0:
print "way %d"%superself.n_ways
superself.n_ways += 1
superself.add_way( self.currElem, c )
@classmethod
def characters(self, chars):
pass
xml.sax.parse(osm_filename, OSMHandler)
self.conn.commit()
c.close()
print "indexing primary tables...",
self.create_indexes()
print "done"
def set_endnode_ref_counts( self ):
"""Populate ways.endnode_refs. Necessary for splitting ways into single-edge sub-ways"""
print "counting end-node references to find way split-points"
c = self.conn.cursor()
endnode_ref_counts = {}
c.execute( "SELECT nds from ways" )
print "...counting"
for i, (nds_str,) in enumerate(c):
if i%5000==0:
print i
nds = json.loads( nds_str )
for nd in nds:
endnode_ref_counts[ nd ] = endnode_ref_counts.get( nd, 0 )+1
print "...updating nodes table"
for i, (node_id, ref_count) in enumerate(endnode_ref_counts.items()):
if i%5000==0:
print i
if ref_count > 1:
c.execute( "UPDATE nodes SET endnode_refs = ? WHERE id=?", (ref_count, node_id) )
self.conn.commit()
c.close()
def index_endnodes( self ):
print "indexing endpoint nodes into rtree"
c = self.conn.cursor()
#TODO index endnodes if they're at the end of oneways - which only have one way ref, but are still endnodes
c.execute( "SELECT id, lat, lon FROM nodes WHERE endnode_refs > 1" )
for id, lat, lon in c:
self.index.add( int(id), (lon, lat, lon, lat) )
c.close()
def create_and_populate_edges_table( self, tolerant=False ):
self.set_endnode_ref_counts()
self.index_endnodes()
print "splitting ways and inserting into edge table"
c = self.conn.cursor()
c.execute( "CREATE TABLE edges (id TEXT, parent_id TEXT, start_nd TEXT, end_nd TEXT, dist FLOAT, geom TEXT)" )
for i, way in enumerate(self.ways()):
try:
if i%5000==0:
print i
subways = []
curr_subway = [ way.nds[0] ] # add first node to the current subway
for nd in way.nds[1:-1]: # for every internal node of the way
curr_subway.append( nd )
if self.node(nd)[4] > 1: # node reference count is greater than one, node is shared by two ways
subways.append( curr_subway )
curr_subway = [ nd ]
curr_subway.append( way.nds[-1] ) # add the last node to the current subway, and store the subway
subways.append( curr_subway );
#insert into edge table
for i, subway in enumerate(subways):
coords = [(lambda x:(x[3],x[2]))(self.node(nd)) for nd in subway]
packt = pack_coords( coords )
dist = sum([vincenty(lat1, lng1, lat2, lng2) for (lng1, lat1), (lng2, lat2) in cons(coords)])
c.execute( "INSERT INTO edges VALUES (?, ?, ?, ?, ?, ?)", ("%s-%s"%(way.id, i),
way.id,
subway[0],
subway[-1],
dist,
packt) )
except IndexError:
if tolerant:
continue
else:
raise
print "indexing edges...",
c.execute( "CREATE INDEX edges_id ON edges (id)" )
c.execute( "CREATE INDEX edges_parent_id ON edges (parent_id)" )
print "done"
self.conn.commit()
c.close()
def edge(self, id):
c = self.conn.cursor()
c.execute( "SELECT edges.*, ways.tags FROM edges, ways WHERE ways.id = edges.parent_id AND edges.id = ?", (id,) )
try:
ret = c.next()
way_id, parent_id, from_nd, to_nd, dist, geom, tags = ret
return (way_id, parent_id, from_nd, to_nd, dist, unpack_coords( geom ), json.loads(tags))
except StopIteration:
c.close()
raise IndexError( "Database does not have an edge with id '%s'"%id )
c.close()
return ret
def edges(self):
c = self.conn.cursor()
c.execute( "SELECT edges.*, ways.tags FROM edges, ways WHERE ways.id = edges.parent_id" )
for way_id, parent_id, from_nd, to_nd, dist, geom, tags in c:
yield (way_id, parent_id, from_nd, to_nd, dist, unpack_coords(geom), json.loads(tags))
c.close()
def add_way( self, way, curs=None ):
if curs is None:
curs = self.conn.cursor()
close_cursor = True
else:
close_cursor = False
curs.execute("INSERT INTO ways (id, tags, nds) VALUES (?, ?, ?)", (way.id, json.dumps(way.tags), json.dumps(way.nd_ids) ))
if close_cursor:
self.conn.commit()
curs.close()
def add_node( self, node, curs=None ):
if curs is None:
curs = self.conn.cursor()
close_cursor = True
else:
close_cursor = False
curs.execute("INSERT INTO nodes (id, tags, lat, lon) VALUES (?, ?, ?, ?)", ( node.id, json.dumps(node.tags), node.lat, node.lon ) )
if close_cursor:
self.conn.commit()
curs.close()
def nodes(self):
c = self.conn.cursor()
c.execute( "SELECT * FROM nodes" )
for node_row in c:
yield node_row
c.close()
def node(self, id):
c = self.conn.cursor()
c.execute( "SELECT * FROM nodes WHERE id = ?", (id,) )
try:
ret = c.next()
except StopIteration:
c.close()
raise IndexError( "Database does not have node with id '%s'"%id )
c.close()
return ret
def nearest_node(self, lat, lon, range=0.005):
c = self.conn.cursor()
if self.index:
print "YOU'RE USING THE INDEX"
id = list(self.index.nearest( (lon, lat), 1 ))[0]
print "THE ID IS %d"%id
c.execute( "SELECT id, lat, lon FROM nodes WHERE id = ?", (id,) )
else:
c.execute( "SELECT id, lat, lon FROM nodes WHERE endnode_refs > 1 AND lat > ? AND lat < ? AND lon > ? AND lon < ?", (lat-range, lat+range, lon-range, lon+range) )
dists = [(nid, nlat, nlon, ((nlat-lat)**2+(nlon-lon)**2)**0.5) for nid, nlat, nlon in c]
if len(dists)==0:
return (None, None, None, None)
return min( dists, key = lambda x:x[3] )
def nearest_of( self, lat, lon, nodes ):
c = self.conn.cursor()
c.execute( "SELECT id, lat, lon FROM nodes WHERE id IN (%s)"%",".join([str(x) for x in nodes]) )
dists = [(nid, nlat, nlon, ((nlat-lat)**2+(nlon-lon)**2)**0.5) for nid, nlat, nlon in c]
if len(dists)==0:
return (None, None, None, None)
return min( dists, key = lambda x:x[3] )
def way(self, id):
c = self.conn.cursor()
c.execute( "SELECT id, tags, nds FROM ways WHERE id = ?", (id,) )
try:
id, tags_str, nds_str = c.next()
ret = WayRecord(id, tags_str, nds_str)
except StopIteration:
raise Exception( "OSMDB has no way with id '%s'"%id )
finally:
c.close()
return ret
def way_nds(self, id):
c = self.conn.cursor()
c.execute( "SELECT nds FROM ways WHERE id = ?", (id,) )
(nds_str,) = c.next()
c.close()
return json.loads( nds_str )
def ways(self):
c = self.conn.cursor()
c.execute( "SELECT id, tags, nds FROM ways" )
for id, tags_str, nds_str in c:
yield WayRecord( id, tags_str, nds_str )
c.close()
def count_ways(self):
c = self.conn.cursor()
c.execute( "SELECT count(*) FROM ways" )
ret = c.next()[0]
c.close()
return ret
def count_edges(self):
c = self.conn.cursor()
c.execute( "SELECT count(*) FROM edges" )
ret = c.next()[0]
c.close()
return ret
def delete_way(self, id):
c = self.conn.cursor()
c.execute("DELETE FROM ways WHERE id = ?", (id,))
c.close()
def bounds(self):
c = self.conn.cursor()
c.execute( "SELECT min(lon), min(lat), max(lon), max(lat) FROM nodes" )
ret = c.next()
c.close()
return ret
def execute(self,sql,args=None):
c = self.conn.cursor()
if args:
for row in c.execute(sql,args):
yield row
else:
for row in c.execute(sql):
yield row
c.close()
def cursor(self):
return self.conn.cursor()
def test_wayrecord():
wr = WayRecord( "1", {'highway':'bumpkis'}, ['1','2','3'] )
assert wr.id == "1"
assert wr.tags == {'highway':'bumpkis'}
assert wr.nds == ['1','2','3']
wr = WayRecord( "1", "{\"highway\":\"bumpkis\"}", "[\"1\",\"2\",\"3\"]" )
assert wr.id == "1"
assert wr.tags == {'highway':'bumpkis'}
assert wr.nds == ['1','2','3']
def osm_to_osmdb(osm_filename, osmdb_filename, tolerant=False):
osmdb = OSMDB( osmdb_filename, overwrite=True )
osmdb.populate( osm_filename, accept=lambda tags: 'highway' in tags, reporter=sys.stdout )
osmdb.create_and_populate_edges_table(tolerant)
def main():
from sys import argv
usage = "python osmdb.py osm_filename osmdb_filename"
if len(argv) < 3:
print usage
exit()
osm_filename = argv[1]
osmdb_filename = argv[2]
tolerant = 'tolerant' in argv
osm_to_osmdb(osm_filename, osmdb_filename, tolerant)
if __name__=='__main__':
main()
| |
import os
import re
import subprocess
import sys
from dcr.scenario_utils.distro import get_distro
BASE_CGROUP = '/sys/fs/cgroup'
AGENT_CGROUP_NAME = 'WALinuxAgent'
AGENT_SERVICE_NAME = "walinuxagent.service"
CONTROLLERS = ['cpu'] # Only verify the CPU controller since memory accounting is not enabled yet.
DAEMON_CMDLINE_PATTERN = re.compile(r".*python.*waagent.*-daemon")
AGENT_CMDLINE_PATTERN = re.compile(r".*python.*-run-exthandlers")
CREATED_CGROUP_PATTERN = r"..*Created cgroup (/sys/fs/cgroup/.+)"
EXTENSION_PID_ADDED_PATTERN = re.compile(r".*Added PID (\d+) to cgroup[s]* (/sys/fs/cgroup/.+)")
CGROUP_TRACKED_PATTERN = re.compile(r'Started tracking cgroup ([^\s]+)\s+\[(?P<path>[^\s]+)\]')
#
# It is OK for these processes to show up in the Agent's cgroup
#
WHITELISTED_AGENT_REGEXES = [
#
# The monitor thread uses these periodically:
#
re.compile(r"/sbin/dhclient\s.+/run/dhclient.*/var/lib/dhcp/dhclient.*/var/lib/dhcp/dhclient.*"),
re.compile(r".*iptables --version.*"),
re.compile(r".*iptables (-w)? -t security.*"),
#
# The agent starts extensions using systemd-run; the actual extension command will be on a different process.
#
re.compile(r".*systemd-run --unit=Microsoft.Azure.Diagnostics.LinuxDiagnostic_3.* "
r"--scope --slice=azure-vmextensions.slice /var/lib/waagent/Microsoft.Azure.Diagnostics.LinuxDiagnostic-3.*/diagnostic.py "
r"-enable.*"),
#
# The agent can start a new shell process.
#
re.compile(r"^\[sh\]$")
]
def exit_if_cgroups_not_supported():
print("===== Checking if distro supports cgroups =====")
__distro__ = get_distro()
base_fs_exists = os.path.exists(BASE_CGROUP)
if not base_fs_exists:
print("\tDistro {0} does not support cgroups -- exiting".format(__distro__))
sys.exit(1)
else:
print('\tDistro {0} supports cgroups\n'.format(__distro__))
def run_get_output(cmd, print_std_out=False):
# Returns a list of stdout lines without \n at the end of the line.
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True)
output = str(output,
encoding='utf-8',
errors="backslashreplace")
if print_std_out:
print(output)
return output.split("\n")
def is_systemd_distro():
try:
return run_get_output('cat /proc/1/comm')[0].strip() == 'systemd'
except Exception:
return False
def print_cgroups():
print("====== Currently mounted cgroups ======")
for m in run_get_output('mount'):
if 'type cgroup' in m:
print('\t{0}'.format(m))
print("")
def print_processes():
print("====== Currently running processes ======")
processes = run_get_output("ps aux --forest")
for process in processes:
print("\t{0}".format(process))
print("")
def print_service_status(service_status):
# Make sure to replace non-ascii characters since DCR logs anything that goes to stdout and will fail if
# there are non-ascii characters such as the ones showing up in `systemctl status {service_name}`.
for line in service_status:
print("\t" + line.encode("ascii", "replace").decode().replace("\n", ""))
print("")
def get_parent_pid(pid):
try:
with open("/proc/{0}/stat".format(pid), "r") as fh:
raw = fh.readline()
ppid = raw.split(" ")[3]
return ppid
except Exception:
return None
def get_pid_by_cmdline(pattern):
agent_pid = -1
for dirname in os.listdir('/proc'):
if dirname == 'curproc':
continue
try:
with open('/proc/{0}/cmdline'.format(dirname), mode='r') as fd:
ps_cmd = fd.read()
if re.match(pattern, ps_cmd):
agent_pid = dirname
break
except Exception:
pass
return agent_pid
def get_cmdline_by_pid(pid):
try:
with open('/proc/{0}/cmdline'.format(pid), mode='r') as process_fd:
return process_fd.read()
except Exception:
return None
def get_process_cgroups(pid):
with open('/proc/{0}/cgroup'.format(pid), mode='r') as fd:
return fd.read().split('\n')[:-1]
def get_agent_cgroup_mount_path():
# TODO: change the service name based on distro (SUSE is waagent, for example)
if is_systemd_distro():
return os.path.join('/', 'azure.slice', AGENT_SERVICE_NAME)
else:
return os.path.join('/', AGENT_SERVICE_NAME)
def check_cgroup_for_agent_process(name, pid):
process_cgroups = get_process_cgroups(pid)
expected_cgroup_path = get_agent_cgroup_mount_path()
print('\tretrieved cgroups for {0}:'.format(name))
for cgroup in process_cgroups:
print("\t\t{0}".format(cgroup))
print("")
for controller in CONTROLLERS:
for cgroup in process_cgroups:
# This is what the lines in /proc/PID/cgroup look like:
# 4:memory:/system.slice/walinuxagent.service
# 7:memory:/WALinuxAgent/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux
# We are interested in extracting the controller and mount path
mounted_controller = cgroup.split(':')[1].split(',')
mounted_path = cgroup.split(':')[2]
if controller in mounted_controller:
if mounted_path != expected_cgroup_path:
raise Exception("Expected {0} cgroup to be mounted under {1}, "
"but it's mounted under {2}".format(name, expected_cgroup_path, mounted_path))
print("\t{0}'s PID is {1}, cgroup mount path is {2}".format(name, pid, expected_cgroup_path))
print("\tverified {0}'s /proc/cgroup is expected!\n".format(name))
def check_pids_in_agent_cgroup(agent_cgroup_procs, daemon_pid, agent_pid):
with open(agent_cgroup_procs, "r") as agent_fd:
content = agent_fd.read()
print("\tcontent of {0}:\n{1}".format(agent_cgroup_procs, content))
pids = content.split('\n')[:-1]
if daemon_pid not in pids:
raise Exception("Daemon PID {0} not found in expected cgroup {1}!".format(daemon_pid, agent_cgroup_procs))
if agent_pid not in pids:
raise Exception("Agent PID {0} not found in expected cgroup {1}!".format(agent_pid, agent_cgroup_procs))
for pid in pids:
if pid == daemon_pid or pid == agent_pid:
continue
else:
# There is an unexpected PID in the cgroup, check what process it is
cmd = get_cmdline_by_pid(pid)
ppid = get_parent_pid(pid)
whitelisted = is_whitelisted(cmd)
# If the process is whitelisted and a child of the agent, allow it. The process could have terminated
# in the meantime, but we allow it if it's whitelisted.
if whitelisted and (ppid is None or ppid == agent_pid or ppid == daemon_pid):
print("\tFound whitelisted process in agent cgroup:\n\t{0} {1}\n"
"\tparent process {2}".format(pid, cmd, ppid))
continue
raise Exception("Found unexpected process in the agent cgroup:\n\t{0} {1}\n"
"\tparent process {2}".format(pid, cmd, ppid))
return True
def is_whitelisted(cmd):
matches = [re.match(r, cmd) is not None for r in WHITELISTED_AGENT_REGEXES]
return any(matches)
def parse_processes_from_systemctl_status(service_status):
processes_start_pattern = re.compile(r".*CGroup:\s+.*")
processes_end_pattern = re.compile(r"^$")
processes_start_index = -1
processes_end_index = -1
for line in service_status:
if re.match(processes_start_pattern, line):
processes_start_index = service_status.index(line)
if re.match(processes_end_pattern, line):
processes_end_index = service_status.index(line)
break
processes_raw = service_status[processes_start_index+1:processes_end_index]
# Remove non-ascii characters and extra whitespace
cleaned = list(map(lambda x: ''.join([i if ord(i) < 128 else '' for i in x]).strip(), processes_raw))
# Return a list of tuples [(PID1, cmdline1), (PID2, cmdline2)]
processes = list(map(lambda x: (x.split(" ")[0], ' '.join(x.split(" ")[1:])), cleaned))
return processes
def verify_agent_cgroup_assigned_correctly_systemd(service_status):
print_service_status(service_status)
is_active = False
is_active_pattern = re.compile(r".*Active:\s+active.*")
for line in service_status:
if re.match(is_active_pattern, line):
is_active = True
if not is_active:
raise Exception('walinuxagent service was not active')
print("\tVerified the agent service status is correct!\n")
def verify_agent_cgroup_assigned_correctly_filesystem():
print("===== Verifying the daemon and the agent are assigned to the same correct cgroup using filesystem =====")
# Find out daemon and agent PIDs by looking at currently running processes
daemon_pid = get_pid_by_cmdline(DAEMON_CMDLINE_PATTERN)
agent_pid = get_pid_by_cmdline(AGENT_CMDLINE_PATTERN)
if daemon_pid == -1:
raise Exception('daemon PID not found!')
if agent_pid == -1:
raise Exception('agent PID not found!')
# Ensure both the daemon and the agent are assigned to the (same) expected cgroup
check_cgroup_for_agent_process("daemon", daemon_pid)
check_cgroup_for_agent_process("agent", agent_pid)
# Ensure the daemon/agent cgroup doesn't have any other processes there
for controller in CONTROLLERS:
# Mount path is /system.slice/walinuxagent.service or
# /WALinuxAgent/WALinuxAgent, so remove the first "/" to correctly build path
agent_cgroup_mount_path = get_agent_cgroup_mount_path()[1:]
agent_cgroup_path = os.path.join(BASE_CGROUP, controller, agent_cgroup_mount_path)
agent_cgroup_procs = os.path.join(agent_cgroup_path, 'cgroup.procs')
# Check if the processes in the agent cgroup are expected. We expect to see the daemon and extension handler
# processes. Sometimes, we might observe more than one extension handler process. This is short-lived and
# happens because, in Linux, the process doubles before forking. Therefore, check twice with a bit of delay
# in between to see if it goes away. Still raise an exception if this happens so we can keep track of it.
check_pids_in_agent_cgroup(agent_cgroup_procs, daemon_pid, agent_pid)
print('\tVerified the daemon and agent are assigned to the same correct cgroup {0}'.format(agent_cgroup_path))
print("")
def verify_agent_cgroup_assigned_correctly():
if is_systemd_distro():
print("===== Verifying the daemon and the agent are assigned to the same correct cgroup using systemd =====")
output = run_get_output("systemctl status walinuxagent")
verify_agent_cgroup_assigned_correctly_systemd(output)
else:
verify_agent_cgroup_assigned_correctly_filesystem()
| |
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from warnings import warn
from numpy import array, ones, zeros, Inf, r_, c_, concatenate, shape
from numpy import flatnonzero as find
from scipy.sparse import spdiags, hstack, vstack, csr_matrix as sparse
from scipy.sparse import eye as speye
from pypower import idx_dcline
from pypower.add_userfcn import add_userfcn
from pypower.remove_userfcn import remove_userfcn
from pypower.isload import isload
from pypower.idx_gen import MBASE, GEN_STATUS, PMIN, PMAX, GEN_BUS, PG, QG, \
VG, QMIN, QMAX, MU_QMIN, MU_PMAX, MU_PMIN, MU_QMAX
from pypower.idx_bus import BUS_TYPE, REF, PV
from pypower.idx_cost import MODEL, POLYNOMIAL, NCOST
def toggle_dcline(ppc, on_off):
"""Enable or disable DC line modeling.
Enables or disables a set of OPF userfcn callbacks to implement
DC lines as a pair of linked generators. While it uses the OPF
extension mechanism, this implementation works for simple power
flow as well as OPF problems.
These callbacks expect to find a 'dcline' field in the input MPC,
where MPC.dcline is an ndc x 17 matrix with columns as defined
in IDX_DCLINE, where ndc is the number of DC lines.
The 'int2ext' callback also packages up flow results and stores them
in appropriate columns of MPC.dcline.
NOTE: Because of the way this extension modifies the number of
rows in the gen and gencost matrices, caution must be taken
when using it with other extensions that deal with generators.
Examples:
ppc = loadcase('t_case9_dcline')
ppc = toggle_dcline(ppc, 'on')
results1 = runpf(ppc)
results2 = runopf(ppc)
@see: L{idx_dcline}, L{add_userfcn}, L{remove_userfcn}, L{run_userfcn}.
"""
if on_off == 'on':
## define named indices into data matrices
c = idx_dcline.c
## check for proper input data
if 'dcline' not in ppc or ppc['dcline'].shape[1] < c["LOSS1"] + 1:
raise ValueError('toggle_dcline: case must contain a '
'\'dcline\' field, an ndc x %d matrix.', c["LOSS1"])
if 'dclinecost' in ppc and ppc['dcline'].shape[0] != ppc['dclinecost'].shape[0]:
raise ValueError('toggle_dcline: number of rows in \'dcline\''
' field (%d) and \'dclinecost\' field (%d) do not match.' %
(ppc['dcline'].shape[0], ppc['dclinecost'].shape[0]))
k = find(ppc['dcline'][:, c["LOSS1"]] < 0)
if len(k) > 0:
warn('toggle_dcline: linear loss term is negative for DC line '
'from bus %d to %d\n' %
ppc['dcline'][k, c['F_BUS']:c['T_BUS'] + 1].T)
## add callback functions
## note: assumes all necessary data included in 1st arg (ppc, om, results)
## so, no additional explicit args are needed
ppc = add_userfcn(ppc, 'ext2int', userfcn_dcline_ext2int)
ppc = add_userfcn(ppc, 'formulation', userfcn_dcline_formulation)
ppc = add_userfcn(ppc, 'int2ext', userfcn_dcline_int2ext)
ppc = add_userfcn(ppc, 'printpf', userfcn_dcline_printpf)
ppc = add_userfcn(ppc, 'savecase', userfcn_dcline_savecase)
elif on_off == 'off':
ppc = remove_userfcn(ppc, 'savecase', userfcn_dcline_savecase)
ppc = remove_userfcn(ppc, 'printpf', userfcn_dcline_printpf)
ppc = remove_userfcn(ppc, 'int2ext', userfcn_dcline_int2ext)
ppc = remove_userfcn(ppc, 'formulation', userfcn_dcline_formulation)
ppc = remove_userfcn(ppc, 'ext2int', userfcn_dcline_ext2int)
else:
raise ValueError('toggle_dcline: 2nd argument must be either '
'\'on\' or \'off\'')
return ppc
##----- ext2int ------------------------------------------------------
def userfcn_dcline_ext2int(ppc, args):
"""This is the 'ext2int' stage userfcn callback that prepares the input
data for the formulation stage. It expects to find a 'dcline' field
in ppc as described above. The optional args are not currently used.
It adds two dummy generators for each in-service DC line, with the
appropriate upper and lower generation bounds and corresponding
zero-cost entries in gencost.
"""
c = idx_dcline.c
## initialize some things
if 'dclinecost' in ppc:
havecost = True
else:
havecost = False
## save version with external indexing
ppc['order']['ext']['dcline'] = ppc['dcline'] ## external indexing
if havecost:
ppc['order']['ext']['dclinecost'] = ppc['dclinecost'] ## external indexing
ppc['order']['ext']['status'] = {}
## work with only in-service DC lines
ppc['order']['ext']['status']['on'] = find(ppc['dcline'][:, c['BR_STATUS']] > 0)
ppc['order']['ext']['status']['off'] = find(ppc['dcline'][:, c['BR_STATUS']] <= 0)
## remove out-of-service DC lines
dc = ppc['dcline'][ppc['order']['ext']['status']['on'], :] ## only in-service DC lines
if havecost:
dcc = ppc['dclinecost'][ppc['order']['ext']['status']['on'], :] ## only in-service DC lines
ppc['dclinecost'] = dcc
ndc = dc.shape[0] ## number of in-service DC lines
o = ppc['order']
##----- convert stuff to internal indexing -----
dc[:, c['F_BUS']] = o['bus']['e2i'][dc[:, c['F_BUS']]]
dc[:, c['T_BUS']] = o['bus']['e2i'][dc[:, c['T_BUS']]]
ppc['dcline'] = dc
##----- create gens to represent DC line terminals -----
## ensure consistency of initial values of PF, PT and losses
## (for simple power flow cases)
dc[:, c['PT']] = dc[:, c['PF']] - (dc[:, c['LOSS0']] + dc[:, c['LOSS1']] * dc[:, c['PF']])
## create gens
fg = zeros((ndc, ppc['gen'].shape[1]))
fg[:, MBASE] = 100
fg[:, GEN_STATUS] = dc[:, c['BR_STATUS']] ## status (should be all 1's)
fg[:, PMIN] = -Inf
fg[:, PMAX] = Inf
tg = fg.copy()
fg[:, GEN_BUS] = dc[:, c['F_BUS']] ## from bus
tg[:, GEN_BUS] = dc[:, c['T_BUS']] ## to bus
fg[:, PG] = -dc[:, c['PF']] ## flow (extracted at "from")
tg[:, PG] = dc[:, c['PT']] ## flow (injected at "to")
fg[:, QG] = dc[:, c['QF']] ## VAr injection at "from"
tg[:, QG] = dc[:, c['QT']] ## VAr injection at "to"
fg[:, VG] = dc[:, c['VF']] ## voltage set-point at "from"
tg[:, VG] = dc[:, c['VT']] ## voltage set-point at "to"
k = find(dc[:, c['PMIN']] >= 0) ## min positive direction flow
if len(k) > 0: ## contrain at "from" end
fg[k, PMAX] = -dc[k, c['PMIN']] ## "from" extraction lower lim
k = find(dc[:, c['PMAX']] >= 0) ## max positive direction flow
if len(k) > 0: ## contrain at "from" end
fg[k, PMIN] = -dc[k, c['PMAX']] ## "from" extraction upper lim
k = find(dc[:, c['PMIN']] < 0) ## max negative direction flow
if len(k) > 0: ## contrain at "to" end
tg[k, PMIN] = dc[k, c['PMIN']] ## "to" injection lower lim
k = find(dc[:, c['PMAX']] < 0) ## min negative direction flow
if len(k) > 0: ## contrain at "to" end
tg[k, PMAX] = dc[k, c['PMAX']] ## "to" injection upper lim
fg[:, QMIN] = dc[:, c['QMINF']] ## "from" VAr injection lower lim
fg[:, QMAX] = dc[:, c['QMAXF']] ## "from" VAr injection upper lim
tg[:, QMIN] = dc[:, c['QMINT']] ## "to" VAr injection lower lim
tg[:, QMAX] = dc[:, c['QMAXT']] ## "to" VAr injection upper lim
## fudge PMAX a bit if necessary to avoid triggering
## dispatchable load constant power factor constraints
fg[isload(fg), PMAX] = -1e-6
tg[isload(tg), PMAX] = -1e-6
## set all terminal buses to PV (except ref bus)
refbus = find(ppc['bus'][:, BUS_TYPE] == REF)
ppc['bus'][dc[:, c['F_BUS']], BUS_TYPE] = PV
ppc['bus'][dc[:, c['T_BUS']], BUS_TYPE] = PV
ppc['bus'][refbus, BUS_TYPE] = REF
## append dummy gens
ppc['gen'] = r_[ppc['gen'], fg, tg]
## gencost
if 'gencost' in ppc and len(ppc['gencost']) > 0:
ngcr, ngcc = ppc['gencost'].shape ## dimensions of gencost
if havecost: ## user has provided costs
ndccc = dcc.shape[1] ## number of dclinecost columns
ccc = max(r_[ngcc, ndccc]) ## number of columns in new gencost
if ccc > ngcc: ## right zero-pad gencost
ppc.gencost = c_[ppc['gencost'], zeros(ngcr, ccc-ngcc)]
## flip function across vertical axis and append to gencost
## (PF for DC line = -PG for dummy gen at "from" bus)
for k in range(ndc):
if dcc[k, MODEL] == POLYNOMIAL:
nc = dcc[k, NCOST]
temp = dcc[k, NCOST + range(nc + 1)]
## flip sign on coefficients of odd terms
## (every other starting with linear term,
## that is, the next to last one)
# temp((nc-1):-2:1) = -temp((nc-1):-2:1)
temp[range(nc, 0, -2)] = -temp[range(nc, 0, -2)]
else: ## dcc(k, MODEL) == PW_LINEAR
nc = dcc[k, NCOST]
temp = dcc[k, NCOST + range(2*nc + 1)]
## switch sign on horizontal coordinate
xx = -temp[range(0, 2 * nc + 1, 2)]
yy = temp[range(1, 2 * nc + 1, 2)]
temp[range(0, 2*nc + 1, 2)] = xx[-1::-1]
temp[range(1, 2*nc + 1, 2)] = yy[-1::-1]
padding = zeros(ccc - NCOST - len(temp))
gck = c_[dcc[k, :NCOST + 1], temp, padding]
## append to gencost
ppc['gencost'] = r_[ppc['gencost'], gck]
## use zero cost on "to" end gen
tgc = ones((ndc, 1)) * [2, 0, 0, 2, zeros(ccc-4)]
ppc['gencost'] = c_[ppc['gencost'], tgc]
else:
## use zero cost as default
dcgc = ones((2 * ndc, 1)) * concatenate([array([2, 0, 0, 2]), zeros(ngcc-4)])
ppc['gencost'] = r_[ppc['gencost'], dcgc]
return ppc
##----- formulation --------------------------------------------------
def userfcn_dcline_formulation(om, args):
"""This is the 'formulation' stage userfcn callback that defines the
user constraints for the dummy generators representing DC lines.
It expects to find a 'dcline' field in the ppc stored in om, as
described above. By the time it is passed to this callback,
MPC.dcline should contain only in-service lines and the from and
two bus columns should be converted to internal indexing. The
optional args are not currently used.
If Pf, Pt and Ploss are the flow at the "from" end, flow at the
"to" end and loss respectively, and L0 and L1 are the linear loss
coefficients, the the relationships between them is given by:
Pf - Ploss = Pt
Ploss = L0 + L1 * Pf
If Pgf and Pgt represent the injections of the dummy generators
representing the DC line injections into the network, then
Pgf = -Pf and Pgt = Pt, and we can combine all of the above to
get the following constraint on Pgf ang Pgt:
-Pgf - (L0 - L1 * Pgf) = Pgt
which can be written:
-L0 <= (1 - L1) * Pgf + Pgt <= -L0
"""
## define named indices into data matrices
c = idx_dcline.c
## initialize some things
ppc = om.get_ppc()
dc = ppc['dcline']
ndc = dc.shape[0] ## number of in-service DC lines
ng = ppc['gen'].shape[0] - 2 * ndc ## number of original gens/disp loads
## constraints
nL0 = -dc[:, c['LOSS0']] / ppc['baseMVA']
L1 = dc[:, c['LOSS1']]
Adc = hstack([sparse((ndc, ng)), spdiags(1-L1, 0, ndc, ndc), speye(ndc, ndc)], format="csr")
## add them to the model
om = om.add_constraints('dcline', Adc, nL0, nL0, ['Pg'])
return om
##----- int2ext ------------------------------------------------------
def userfcn_dcline_int2ext(results, args):
"""This is the 'int2ext' stage userfcn callback that converts everything
back to external indexing and packages up the results. It expects to
find a 'dcline' field in the results struct as described for ppc
above. It also expects that the last 2*ndc entries in the gen and
gencost matrices correspond to the in-service DC lines (where ndc is
the number of rows in MPC.dcline. These extra rows are removed from
gen and gencost and the flow is taken from the PG of these gens and
placed in the flow column of the appropiate dcline row. The
optional args are not currently used.
"""
c = idx_dcline.c
## initialize some things
o = results['order']
k = find(o['ext']['dcline'][:, c['BR_STATUS']])
ndc = len(k) ## number of in-service DC lines
ng = results['gen'].shape[0] - 2*ndc; ## number of original gens/disp loads
## extract dummy gens
fg = results['gen'][ng:ng + ndc, :]
tg = results['gen'][ng + ndc:ng + 2 * ndc, :]
## remove dummy gens
#results['gen'] = results['gen'][:ng + 1, :]
#results['gencost'] = results['gencost'][:ng + 1, :]
results['gen'] = results['gen'][:ng, :]
results['gencost'] = results['gencost'][:ng, :]
## get the solved flows
results['dcline'][:, c['PF']] = -fg[:, PG]
results['dcline'][:, c['PT']] = tg[:, PG]
results['dcline'][:, c['QF']] = fg[:, QG]
results['dcline'][:, c['QT']] = tg[:, QG]
results['dcline'][:, c['VF']] = fg[:, VG]
results['dcline'][:, c['VT']] = tg[:, VG]
if fg.shape[1] >= MU_QMIN:
results['dcline'] = c_[results['dcline'], zeros((ndc, 6))]
results['dcline'][:, c['MU_PMIN'] ] = fg[:, MU_PMAX] + tg[:, MU_PMIN]
results['dcline'][:, c['MU_PMAX'] ] = fg[:, MU_PMIN] + tg[:, MU_PMAX]
results['dcline'][:, c['MU_QMINF']] = fg[:, MU_QMIN]
results['dcline'][:, c['MU_QMAXF']] = fg[:, MU_QMAX]
results['dcline'][:, c['MU_QMINT']] = tg[:, MU_QMIN]
results['dcline'][:, c['MU_QMAXT']] = tg[:, MU_QMAX]
results['order']['int'] = {}
##----- convert stuff back to external indexing -----
results['order']['int']['dcline'] = results['dcline'] ## save internal version
## copy results to external version
o['ext']['dcline'][k, c['PF']:c['VT'] + 1] = results['dcline'][:, c['PF']:c['VT'] + 1]
if results['dcline'].shape[1] == c['MU_QMAXT'] + 1:
o['ext']['dcline'] = c_[o['ext']['dcline'], zeros((ndc, 6))]
o['ext']['dcline'][k, c['MU_PMIN']:c['MU_QMAXT'] + 1] = \
results['dcline'][:, c['MU_PMIN']:c['MU_QMAXT'] + 1]
results['dcline'] = o['ext']['dcline'] ## use external version
return results
##----- printpf ------------------------------------------------------
def userfcn_dcline_printpf(results, fd, ppopt, args):
"""This is the 'printpf' stage userfcn callback that pretty-prints the
results. It expects a results struct, a file descriptor and a MATPOWER
options vector. The optional args are not currently used.
"""
## define named indices into data matrices
c = idx_dcline.c
## options
OUT_ALL = ppopt['OUT_ALL']
OUT_BRANCH = OUT_ALL == 1 or (OUT_ALL == -1 and ppopt['OUT_BRANCH'])
if OUT_ALL == -1:
OUT_ALL_LIM = ppopt['OUT_ALL_LIM']
elif OUT_ALL == 1:
OUT_ALL_LIM = 2
else:
OUT_ALL_LIM = 0
if OUT_ALL_LIM == -1:
OUT_LINE_LIM = ppopt['OUT_LINE_LIM']
else:
OUT_LINE_LIM = OUT_ALL_LIM
ctol = ppopt['OPF_VIOLATION'] ## constraint violation tolerance
ptol = 1e-4 ## tolerance for displaying shadow prices
##----- print results -----
dc = results['dcline']
ndc = dc.shape[0]
kk = find(dc[:, c['BR_STATUS']] != 0)
if OUT_BRANCH:
fd.write('\n================================================================================')
fd.write('\n| DC Line Data |')
fd.write('\n================================================================================')
fd.write('\n Line From To Power Flow Loss Reactive Inj (MVAr)')
fd.write('\n # Bus Bus From (MW) To (MW) (MW) From To ')
fd.write('\n------ ------ ------ --------- --------- --------- --------- ---------')
loss = 0
for k in range(ndc):
if dc[k, c['BR_STATUS']]: ## status on
fd.write('\n{0:5.0f}{1:8.0f}{2:8.0f}{3:11.2f}{4:11.2f}{5:11.2f}{6:11.2f}{7:11.2f}'.format(*r_[k, dc[k, c['F_BUS']:c['T_BUS'] + 1], dc[k, c['PF']:c['PT'] + 1],dc[k, c['PF']] - dc[k, c['PT']], dc[k, c['QF']:c['QT'] + 1]]))
loss = loss + dc[k, c['PF']] - dc[k, c['PT']]
else:
fd.write('\n%5d%8d%8d%11s%11s%11s%11s%11s' %
(k, dc[k, c['F_BUS']:c['T_BUS'] + 1], '- ', '- ', '- ', '- ', '- '))
fd.write('\n ---------')
fd.write('\n Total:{0:11.2f}\n'.format(loss))
if OUT_LINE_LIM == 2 or (OUT_LINE_LIM == 1 and
(any(dc[kk, c['PF']] > dc[kk, c['PMAX']] - ctol) or
any(dc[kk, c['MU_PMIN']] > ptol) or
any(dc[kk, c['MU_PMAX']] > ptol))):
fd.write('\n================================================================================')
fd.write('\n| DC Line Constraints |')
fd.write('\n================================================================================')
fd.write('\n Line From To Minimum Actual Flow Maximum')
fd.write('\n # Bus Bus Pmin mu Pmin (MW) Pmax Pmax mu ')
fd.write('\n------ ------ ------ --------- --------- --------- --------- ---------')
for k in range(ndc):
if OUT_LINE_LIM == 2 or (OUT_LINE_LIM == 1 and
(dc[k, c['PF']] > dc[k, c['PMAX']] - ctol or
dc[k, c['MU_PMIN']] > ptol or
dc[k, c['MU_PMAX']] > ptol)):
if dc[k, c['BR_STATUS']]: ## status on
fd.write('\n{0:5.0f}{1:8.0f}{2:8.0f}'.format(*r_[k, dc[k, c['F_BUS']:c['T_BUS'] + 1]]))
#fd.write('\n%5d%8d%8d' % (k + 1, dc[k, c['F_BUS']:c['T_BUS'] + 1] ))
if dc[k, c['MU_PMIN']] > ptol:
fd.write('{0:11.3f}'.format(dc[k, c['MU_PMIN']]) )
else:
fd.write('%11s' % ('- '))
fd.write('{0:11.2f}{1:11.2f}{2:11.2f}' \
.format(*r_[dc[k, c['PMIN']], dc[k, c['PF']], dc[k, c['PMAX']]]))
if dc[k, c['MU_PMAX']] > ptol:
fd.write('{0:11.3f}'.format(dc[k, c['MU_PMAX']]))
else:
fd.write('%11s' % ('- '))
else:
fd.write('\n%5d%8d%8d%11s%11s%11s%11s%11s' %
(k, dc[k, c['F_BUS']:c['T_BUS'] + 1], '- ', '- ', '- ', '- ', '- '))
fd.write('\n')
return results
##----- savecase -----------------------------------------------------
def userfcn_dcline_savecase(ppc, fd, prefix, args):
"""This is the 'savecase' stage userfcn callback that prints the Py-file
code to save the 'dcline' field in the case file. It expects a
PYPOWER case dict (ppc), a file descriptor and variable prefix
(usually 'ppc.'). The optional args are not currently used.
"""
## define named indices into data matrices
c = idx_dcline.c
## save it
ncols = ppc['dcline'].shape[1]
fd.write('\n####----- DC Line Data -----####\n')
if ncols < c['MU_QMAXT']:
fd.write('##\tfbus\ttbus\tstatus\tPf\tPt\tQf\tQt\tVf\tVt\tPmin\tPmax\tQminF\tQmaxF\tQminT\tQmaxT\tloss0\tloss1\n')
else:
fd.write('##\tfbus\ttbus\tstatus\tPf\tPt\tQf\tQt\tVf\tVt\tPmin\tPmax\tQminF\tQmaxF\tQminT\tQmaxT\tloss0\tloss1\tmuPmin\tmuPmax\tmuQminF\tmuQmaxF\tmuQminT\tmuQmaxT\n')
template = '\t%d\t%d\t%d\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g\t%.9g'
if ncols == c['MU_QMAXT'] + 1:
template = [template, '\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f']
template = template + ';\n'
fd.write('%sdcline = [\n' % prefix)
fd.write(template, ppc['dcline'].T)
fd.write('];\n')
return ppc
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import service_base
LOADBALANCER_PREFIX = "/lb"
# Loadbalancer Exceptions
class DelayOrTimeoutInvalid(nexception.BadRequest):
message = _("Delay must be greater than or equal to timeout")
class NoEligibleBackend(nexception.NotFound):
message = _("No eligible backend for pool %(pool_id)s")
class VipNotFound(nexception.NotFound):
message = _("Vip %(vip_id)s could not be found")
class VipExists(nexception.NeutronException):
message = _("Another Vip already exists for pool %(pool_id)s")
class PoolNotFound(nexception.NotFound):
message = _("Pool %(pool_id)s could not be found")
class MemberNotFound(nexception.NotFound):
message = _("Member %(member_id)s could not be found")
class HealthMonitorNotFound(nexception.NotFound):
message = _("Health_monitor %(monitor_id)s could not be found")
class PoolMonitorAssociationNotFound(nexception.NotFound):
message = _("Monitor %(monitor_id)s is not associated "
"with Pool %(pool_id)s")
class PoolMonitorAssociationExists(nexception.Conflict):
message = _('health_monitor %(monitor_id)s is already associated '
'with pool %(pool_id)s')
class StateInvalid(nexception.NeutronException):
message = _("Invalid state %(state)s of Loadbalancer resource %(id)s")
class PoolInUse(nexception.InUse):
message = _("Pool %(pool_id)s is still in use")
class HealthMonitorInUse(nexception.InUse):
message = _("Health monitor %(monitor_id)s still has associations with "
"pools")
class PoolStatsNotFound(nexception.NotFound):
message = _("Statistics of Pool %(pool_id)s could not be found")
class ProtocolMismatch(nexception.BadRequest):
message = _("Protocol %(vip_proto)s does not match "
"pool protocol %(pool_proto)s")
class MemberExists(nexception.NeutronException):
message = _("Member with address %(address)s and port %(port)s "
"already present in pool %(pool)s")
RESOURCE_ATTRIBUTE_MAP = {
'vips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'default': attr.ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'port_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'session_persistence': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'type': {'type:values': ['APP_COOKIE',
'HTTP_COOKIE',
'SOURCE_IP'],
'required': True},
'cookie_name': {'type:string': None,
'required': False}}},
'is_visible': True},
'connection_limit': {'allow_post': True, 'allow_put': True,
'default': -1,
'convert_to': attr.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'pools': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'vip_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'provider': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED},
'lb_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:values': ['ROUND_ROBIN',
'LEAST_CONNECTIONS',
'SOURCE_IP']},
'is_visible': True},
'members': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'health_monitors': {'allow_post': True, 'allow_put': True,
'default': None,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_to_list,
'is_visible': True},
'health_monitors_status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'members': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'weight': {'allow_post': True, 'allow_put': True,
'default': 1,
'validate': {'type:range': [0, 256]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'health_monitors': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'type': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'delay': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': attr.convert_to_int,
'is_visible': True},
'timeout': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': attr.convert_to_int,
'is_visible': True},
'max_retries': {'allow_post': True, 'allow_put': True,
'validate': {'type:range': [1, 10]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'http_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': 'GET',
'is_visible': True},
'url_path': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '/',
'is_visible': True},
'expected_codes': {'allow_post': True, 'allow_put': True,
'validate': {
'type:regex':
r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'},
'default': '200',
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'pools': {'allow_post': False, 'allow_put': False,
'is_visible': True}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'health_monitors': {
'parent': {'collection_name': 'pools',
'member_name': 'pool'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
}
}
}
lbaas_quota_opts = [
cfg.IntOpt('quota_vip',
default=10,
help=_('Number of vips allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_pool',
default=10,
help=_('Number of pools allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_member',
default=-1,
help=_('Number of pool members allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_health_monitor',
default=-1,
help=_('Number of health monitors allowed per tenant. '
'A negative value means unlimited.'))
]
cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS')
class Loadbalancer(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "LoadBalancing service"
@classmethod
def get_alias(cls):
return "lbaas"
@classmethod
def get_description(cls):
return "Extension for LoadBalancing service"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/neutron/LBaaS/API_1.0"
@classmethod
def get_updated(cls):
return "2012-10-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['health_monitors_status'] = 'health_monitor_status'
attr.PLURALS.update(plural_mappings)
action_map = {'pool': {'stats': 'GET'}}
resources = resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.LOADBALANCER,
action_map=action_map,
register_quota=True)
plugin = manager.NeutronManager.get_service_plugins()[
constants.LOADBALANCER]
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=LOADBALANCER_PREFIX,
attr_map=params)
resources.append(resource)
return resources
@classmethod
def get_plugin_interface(cls):
return LoadBalancerPluginBase
def update_attributes_map(self, attributes, extension_attrs_map=None):
super(Loadbalancer, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class LoadBalancerPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.LOADBALANCER
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return 'LoadBalancer service plugin'
@abc.abstractmethod
def get_vips(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vip(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_vip(self, context, vip):
pass
@abc.abstractmethod
def update_vip(self, context, id, vip):
pass
@abc.abstractmethod
def delete_vip(self, context, id):
pass
@abc.abstractmethod
def get_pools(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_pool(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_pool(self, context, pool):
pass
@abc.abstractmethod
def update_pool(self, context, id, pool):
pass
@abc.abstractmethod
def delete_pool(self, context, id):
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context, health_monitor, pool_id):
pass
@abc.abstractmethod
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, id, pool_id):
pass
@abc.abstractmethod
def get_members(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_member(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_member(self, context, member):
pass
@abc.abstractmethod
def update_member(self, context, id, member):
pass
@abc.abstractmethod
def delete_member(self, context, id):
pass
@abc.abstractmethod
def get_health_monitors(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_health_monitor(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_health_monitor(self, context, health_monitor):
pass
@abc.abstractmethod
def update_health_monitor(self, context, id, health_monitor):
pass
@abc.abstractmethod
def delete_health_monitor(self, context, id):
pass
| |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Add audit permissions.
Revision ID: 904377398db
Revises: 4838619603a
Create Date: 2013-10-28 17:30:26.084569
"""
# revision identifiers, used by Alembic.
revision = '904377398db'
down_revision = '4838619603a'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
contexts_table = table('contexts',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('related_object_id', sa.Integer),
column('related_object_type', sa.String),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_owner_objects = list(program_editor_objects)
program_owner_objects.append('UserRole')
program_owner_create = list(program_owner_objects)
program_owner_create.append('Audit')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_owner_create,
'read': program_owner_objects,
'update': program_owner_objects,
'delete': program_owner_objects,
}),
updated_at = current_datetime,
)\
.where(roles_table.c.name == 'ProgramOwner'))
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': [],
'read': program_editor_objects,
'update': [],
'delete': [],
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramReader'))
all_tables = [
'audits',
'categories',
'categorizations',
'contexts',
'control_controls',
'control_risks',
'control_sections',
'controls',
'data_assets',
'directive_controls',
'directives',
'documents',
'events',
'facilities',
'helps',
'markets',
'meetings',
'object_controls',
'object_documents',
'object_objectives',
'object_owners',
'object_people',
'object_sections',
'objective_controls',
'objectives',
'options',
'org_groups',
'people',
'products',
'program_controls',
'program_directives',
'programs',
'projects',
'relationship_types',
'relationships',
'requests',
'responses',
'risk_risky_attributes',
'risks',
'risky_attributes',
'roles',
'section_objectives',
'sections',
'systems',
'user_roles',
]
reader_objects = [
'Categorization', 'Category', 'Control', 'ControlControl', 'ControlSection',
'DataAsset', 'Directive', 'Contract', 'Policy', 'Regulation',
'Document', 'Facility', 'Help', 'Market', 'Objective',
'ObjectControl', 'ObjectDocument', 'ObjectObjective',
'ObjectPerson', 'ObjectSection', 'Option', 'OrgGroup', 'PopulationSample',
'Product', 'ProgramControl', 'ProgramDirective', 'Project', 'Relationship',
'RelationshipType', 'Section', 'SystemOrProcess',
'System', 'Process', 'SystemControl', 'SystemSystem', 'ObjectOwner',
'Person', 'Program', 'Role',
]
program_reader_objects = [
'ObjectDocument', 'ObjectObjective', 'ObjectPerson', 'ObjectSection',
'Program', 'ProgramControl', 'ProgramDirective', 'Relationship',
]
audit_create_objects = [
'Request', 'DocumentationResponse', 'InterviewResponse',
'PopulationSampleResponse',
]
audit_owner_create = list(audit_create_objects)
audit_owner_create.append('UserRole')
audit_owner_create.append('Audit')
audit_read_objects = list(audit_create_objects)
audit_read_objects.append('Audit')
auditor_read_objects = [
'Audit', 'Request',
{
'type': 'DocumentationResponse',
'condition': 'in',
'terms': {
'value': ['Accepted', 'Completed',],
'property_name': 'status',
},
},
{
'type': 'InterviewResponse',
'condition': 'in',
'terms': {
'value': ['Accepted', 'Completed',],
'property_name': 'status',
},
},
{
'type': 'PopulationSampleResponse',
'condition': 'in',
'terms': {
'value': ['Accepted', 'Completed',],
'property_name': 'status',
},
},
]
audit_update_objects = list(audit_read_objects)
def upgrade():
#drop Cycle, it doesn't exist
set_permissions([
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
#create join table for infered permissions
#define auditor role
#define program owner priveleges for audit context
#define program editor priveleges for audit context
#define program reader priveleges for audit context
#set_audit_permissions([
#'Audit',
#'Request',
#'Response',
#])
current_datetime = datetime.now()
op.bulk_insert(roles_table,
[
{ 'name': 'AuditorReader',
'description': 'A user with Auditor role for a program audit will '\
'also have this role in the default object context so that '\
'the auditor will have access to the objects required to '\
'perform the audit.',
'permissions_json': json.dumps({
'create': [],
'read': reader_objects,
'update': [],
'delete': [],
}),
'scope': 'System',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
{ 'name': 'AuditorProgramReader',
'description': 'A user with Auditor role for a program audit will '\
'also have this role in the program context so that '\
'the auditor will have access to the private program '\
'information and mappings required to perform the audit.',
'permissions_json': json.dumps({
'create': [],
'read': program_reader_objects,
'update': [],
'delete': [],
}),
'scope': 'Private Program Implied',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
{ 'name': 'ProgramAuditOwner',
'description': 'A user with the ProgramOwner role for a private '\
'program will also have this role in the audit context for any '\
'audit created for that program.',
'permissions_json': json.dumps({
'create': audit_owner_create,
'read': audit_owner_create,
'update': audit_update_objects,
'delete': [],
}),
'scope': 'Audit Implied',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
{ 'name': 'ProgramAuditEditor',
'description': 'A user with the ProgramEditor role for a private '\
'program will also have this role in the audit context for any '\
'audit created for that program.',
'permissions_json': json.dumps({
'create': audit_create_objects,
'read': audit_read_objects,
'update': audit_update_objects,
'delete': [],
}),
'scope': 'Audit Implied',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
{ 'name': 'ProgramAuditReader',
'description': 'A user with the ProgramReader role for a private '\
'program will also have this role in the audit context for any '\
'audit created for that program.',
'permissions_json': json.dumps({
'create': [],
'read': audit_read_objects,
'update': [],
'delete': [],
}),
'scope': 'Audit Implied',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
{ 'name': 'Auditor',
'description': 'The permissions required by an auditor to access '\
'relevant resources for the program being audited.',
'permissions_json': json.dumps({
'create': ['Request'],
'read': auditor_read_objects,
'update': ['Request', 'Response'], #FIXME add response constraints
'delete': [],
}),
'scope': 'Audit',
'created_at': current_datetime,
'updated_at': current_datetime,
'context_id': None,
},
])
#Add role implications table
#Defined within the context of the target so that authorization in the target
#is a requirement to create the implication.
op.create_table('role_implications',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('source_context_id', sa.Integer(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True), #target
sa.Column('source_role_id', sa.Integer(), nullable=False),
sa.Column('role_id', sa.Integer(), nullable=False), #target
sa.Column('modified_by_id', sa.Integer(), nullable=False),
sa.Column(
'created_at', sa.DateTime(), default=sa.text('current_timestamp')),
sa.Column(
'updated_at',
sa.DateTime(),
default=sa.text('current_timestamp'),
onupdate=sa.text('current_timestamp'),
),
sa.ForeignKeyConstraint(['source_context_id',], ['contexts.id',]),
sa.ForeignKeyConstraint(['context_id',], ['contexts.id',]),
sa.ForeignKeyConstraint(['source_role_id',], ['roles.id',]),
sa.ForeignKeyConstraint(['role_id',], ['roles.id',]),
)
op.create_unique_constraint('uq_role_implications', 'role_implications',
['source_context_id', 'context_id', 'source_role_id', 'role_id',])
def downgrade():
op.drop_table('role_implications')
| |
"""
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
import pandas.core.common as com
import pandas.computation.expressions as expressions
from pandas.lib import isscalar
from pandas.tslib import iNaT
from pandas.compat import bind_method
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.core.common import (is_list_like, notnull, isnull,
_values_from_object, _maybe_match_name,
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype, PerformanceWarning, ABCSeries)
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, radd_func, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns'):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
radd_func = radd_func or operator.add
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
# yapf: disable
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(radd_func, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),)
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')), ))
if bool_method:
new_methods.update(
dict(and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x),
names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x),
names('rxor'), op('^'))))
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``operator.add`` for compatibility
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or operator.add
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, radd_func, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self, copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(
dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]), ))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``lambda x, y: operator.add(y, x)`` for
compatibility
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or (lambda x, y: operator.add(y, x))
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(flex_arith_method, radd_func,
flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns',
special=False)
new_methods.update(dict(multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _TimeOp(object):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``maybe_convert_for_time_op`` as an
entry point.
"""
fill_value = iNaT
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
# need to make sure that we are aligning the data
if isinstance(left, ABCSeries) and isinstance(right, ABCSeries):
left, right = left.align(right, copy=False)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
self.name = name
self.na_op = na_op
# left
self.left = left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = (self.is_datetime64_lhs or
self.is_datetime64tz_lhs)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_floating_lhs = left.dtype.kind == 'f'
# right
self.right = right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = (self.is_datetime64_rhs or
self.is_datetime64tz_rhs)
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self.is_floating_rhs = rvalues.dtype.kind == 'f'
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues,
rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if ((self.is_timedelta_lhs and
(self.is_integer_rhs or self.is_floating_rhs)) or
(self.is_timedelta_rhs and
(self.is_integer_lhs or self.is_floating_lhs))):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
"multiplication, but the operator [%s] was"
"passed" % name)
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
"subtraction, but the operator [%s] was "
"passed" % name)
elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
"but the operator [%s] was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
raise ValueError("Incompatbile tz's on datetime subtraction "
"ops")
elif ((self.is_timedelta_lhs or self.is_offset_lhs) and
self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"and a datetime for addition, but the "
"operator [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif (isinstance(values, pd.Series) and
(is_timedelta64_dtype(values) or is_datetime64_dtype(values))):
supplied_dtype = values.dtype
inferred_type = supplied_dtype or lib.infer_dtype(values)
if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or
com.is_datetimetz(inferred_type)):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (supplied_dtype is None and other is not None and
(other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and
isnull(values).all()):
values = np.empty(values.shape, dtype='timedelta64[ns]')
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif (isinstance(ovalues, datetime.datetime) and
hasattr(ovalues, 'tz')):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif com.is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (isinstance(values, (np.ndarray, ABCSeries)) and
is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce')
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif inferred_type == 'floating':
if (isnull(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.tseries.timedeltas import to_timedelta
mask = isnull(lvalues) | isnull(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
if self.name in ('__sub__', '__rsub__'):
self.dtype = 'timedelta64[ns]'
else:
self.dtype = 'datetime64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to "
"Series not vectorized", PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x, self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues)
lvalues = lvalues.astype(np.int64)
if not self.is_floating_rhs:
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and
not self.is_integer_rhs and not self.is_integer_lhs and
self.name in ('__div__', '__truediv__')):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
else:
return False
@classmethod
def maybe_convert_for_time_op(cls, left, right, name, na_op):
"""
if ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``NotImplementedError`` or ``None``, indicating that the
operation is unsupported for datetimes (e.g., an unsupported r_op) or
that the data is not the right type for time ops.
"""
# decide if we can do it
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = (is_datetime64_dtype(left) or
is_datetime64tz_dtype(left))
if not (is_datetime_lhs or is_timedelta_lhs):
return None
return cls(left, right, name, na_op)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(x).__name__,
op=str_rep))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name,
na_op)
if time_converted is None:
lvalues, rvalues = left, right
dtype = None
wrap_results = lambda x: x
elif time_converted is NotImplemented:
return NotImplemented
else:
left, right = time_converted.left, time_converted.right
lvalues, rvalues = time_converted.lvalues, time_converted.rvalues
dtype = time_converted.dtype
wrap_results = time_converted.wrap_results
na_op = time_converted.na_op
if isinstance(rvalues, ABCSeries):
rindex = getattr(rvalues, 'index', rvalues)
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
if left.index.equals(rindex):
index = left.index
else:
index, lidx, ridx = left.index.join(rindex, how='outer',
return_indexers=True)
if lidx is not None:
lvalues = algos.take_1d(lvalues, lidx)
if ridx is not None:
rvalues = algos.take_1d(rvalues, ridx)
arr = na_op(lvalues, rvalues)
return left._constructor(wrap_results(arr), index=index,
name=name, dtype=dtype)
else:
# scalars
if (hasattr(lvalues, 'values') and
not isinstance(lvalues, pd.DatetimeIndex)):
lvalues = lvalues.values
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
return wrapper
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if (needs_i8_conversion(x) or
(not isscalar(y) and needs_i8_conversion(y))):
if isscalar(y):
y = _index.convert_scalar(x, _values_from_object(y))
else:
y = y.view('i8')
mask = isnull(x)
x = x.view('i8')
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError('Series lengths must match to compare')
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = ("Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'.")
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError('Could not compare %s type with Series' %
type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
return filler(self._constructor(na_op(self.values, other.values),
index=self.index, name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = (fill_int if is_self_int_dtype and
is_integer_dtype(np.asarray(other)) else fill_bool)
return filler(self._constructor(
na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
def _radd_compat(left, right):
radd = lambda x, y: y + x
# GH #353, NumPy 1.5.1 workaround
try:
output = radd(left, right)
except TypeError:
raise
return output
_op_descriptions = {'add': {'op': '+',
'desc': 'Addition',
'reversed': False,
'reverse': 'radd'},
'sub': {'op': '-',
'desc': 'Subtraction',
'reversed': False,
'reverse': 'rsub'},
'mul': {'op': '*',
'desc': 'Multiplication',
'reversed': False,
'reverse': 'rmul'},
'mod': {'op': '%',
'desc': 'Modulo',
'reversed': False,
'reverse': 'rmod'},
'pow': {'op': '**',
'desc': 'Exponential power',
'reversed': False,
'reverse': 'rpow'},
'truediv': {'op': '/',
'desc': 'Floating division',
'reversed': False,
'reverse': 'rtruediv'},
'floordiv': {'op': '//',
'desc': 'Integer division',
'reversed': False,
'reverse': 'rfloordiv'}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None,
**eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, ABCSeries, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._constructor(op(self.values, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
radd_func=_radd_compat,
flex_comp_method=_comp_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
radd_func=_radd_compat,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, ABCSeries)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
result[mask] = op(xrav, yrav)
elif hasattr(x, 'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between "
"objects of type {x} and {y}".format(
op=name, x=type(x), y=type(y)))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame
locations are missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, fill_value, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
# TODO: Get all of these to use _constructor_sliced
# casted = self._constructor_sliced(other, index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other, index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value, axis, level)
elif isinstance(other, np.ndarray) and other.ndim: # skips np scalar
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
# casted = self._constructor_sliced(other,
# index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other,
# index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value, axis,
level)
elif other.ndim == 2:
# casted = self._constructor(other, index=self.index,
# columns=self.columns)
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._combine_frame(casted, na_op, fill_value, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, None, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif isinstance(other, np.ndarray):
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif other.ndim == 2:
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._flex_compare_frame(casted, na_op, str_rep, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, ABCSeries):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
| |
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re
import io
from PIL import Image, ImageFile, _binary
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
import sys
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
#Hack to support hi-res rendering
scale = int(scale) or 1
orig_size = size
orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
bbox = [bbox[0], bbox[1], bbox[2] * scale, bbox[3] * scale]
#print("Ghostscript", scale, size, orig_size, bbox, orig_bbox)
import tempfile, os, subprocess
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%d" % (72*scale), # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
]
if gs_windows_binary is not None:
if gs_windows_binary is False:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
gs.stdin.write(("%d %d translate\n" % (-bbox[0], -bbox[1])).encode('ascii'))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.stdin.write(s)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
class PSFile:
"""Wrapper that treats either CR or LF as end of line."""
def __init__(self, fp):
self.fp = fp
self.char = None
def __getattr__(self, id):
v = getattr(self.fp, id)
setattr(self, id, v)
return v
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def read(self, count):
return self.fp.read(count).decode('latin-1')
def tell(self):
pos = self.fp.tell()
if self.char:
pos = pos - 1
return pos
def readline(self):
s = b""
if self.char:
c = self.char
self.char = None
else:
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
if c == b"\r":
self.char = self.fp.read(1)
if self.char == b"\n":
self.char = None
return s.decode('latin-1') + "\n"
def _accept(prefix):
return prefix[:4] == b"%!PS" or i32(prefix) == 0xC6D3D0C5
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
def _open(self):
# FIXME: should check the first 512 bytes to see if this
# really is necessary (platform-dependent, though...)
fp = PSFile(self.fp)
# HEAD
s = fp.read(512)
if s[:4] == "%!PS":
offset = 0
fp.seek(0, 2)
length = fp.tell()
elif i32(s) == 0xC6D3D0C5:
offset = i32(s[4:])
length = i32(s[8:])
fp.seek(offset)
else:
raise SyntaxError("not an EPS file")
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline()
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(s)) for s in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0,0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0:1] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline()
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[0] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
if s[:11] == "%ImageData:":
[x, y, bi, mo, z3, z4, en, id] =\
s[11:].split(None, 7)
x = int(x); y = int(y)
bi = int(bi)
mo = int(mo)
en = int(en)
if en == 1:
decoder = "eps_binary"
elif en == 2:
decoder = "eps_hex"
else:
break
if bi != 8:
break
if mo == 1:
self.mode = "L"
elif mo == 2:
self.mode = "LAB"
elif mo == 3:
self.mode = "RGB"
else:
break
if id[:1] == id[-1:] == '"':
id = id[1:-1]
# Scan forward to the actual image data
while True:
s = fp.readline()
if not s:
break
if s[:len(id)] == id:
self.size = x, y
self.tile2 = [(decoder,
(0, 0, x, y),
fp.tell(),
0)]
return
s = fp.readline()
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream:
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
fp = io.TextIOWrapper(NoCloseStream(fp), encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
#fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0,0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_log import log as logging
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import function
from heat.engine.notification import autoscaling as notification
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import instance_group as instgrp
from heat.engine import rsrc_defn
from heat.engine import support
from heat.scaling import cooldown
LOG = logging.getLogger(__name__)
(EXACT_CAPACITY, CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY) = (
'ExactCapacity', 'ChangeInCapacity', 'PercentChangeInCapacity')
def _calculate_new_capacity(current, adjustment, adjustment_type,
min_adjustment_step, minimum, maximum):
"""
Given the current capacity, calculates the new capacity which results
from applying the given adjustment of the given adjustment-type. The
new capacity will be kept within the maximum and minimum bounds.
"""
def _get_minimum_adjustment(adjustment, min_adjustment_step):
if min_adjustment_step and min_adjustment_step > abs(adjustment):
adjustment = (min_adjustment_step if adjustment > 0
else -min_adjustment_step)
return adjustment
if adjustment_type == CHANGE_IN_CAPACITY:
new_capacity = current + adjustment
elif adjustment_type == EXACT_CAPACITY:
new_capacity = adjustment
else:
# PercentChangeInCapacity
delta = current * adjustment / 100.0
if math.fabs(delta) < 1.0:
rounded = int(math.ceil(delta) if delta > 0.0
else math.floor(delta))
else:
rounded = int(math.floor(delta) if delta > 0.0
else math.ceil(delta))
adjustment = _get_minimum_adjustment(rounded, min_adjustment_step)
new_capacity = current + adjustment
if new_capacity > maximum:
LOG.debug('truncating growth to %s' % maximum)
return maximum
if new_capacity < minimum:
LOG.debug('truncating shrinkage to %s' % minimum)
return minimum
return new_capacity
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
INSTANCE_ID,
) = (
'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
'InstanceId',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
_UPDATE_POLICY_SCHEMA_KEYS = (
ROLLING_UPDATE
) = (
'AutoScalingRollingUpdate'
)
_ROLLING_UPDATE_SCHEMA_KEYS = (
MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
) = (
'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
)
ATTRIBUTES = (
INSTANCE_LIST,
) = (
'InstanceList',
)
properties_schema = {
AVAILABILITY_ZONES: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
required=True
),
LAUNCH_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('The reference to a LaunchConfiguration resource.'),
update_allowed=True
),
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of an existing instance to use to '
'create the Auto Scaling group. If specify this property, '
'will create the group use an existing instance instead of '
'a launch configuration.'),
constraints=[
constraints.CustomConstraint("nova.server")
]
),
MAX_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of instances in the group.'),
required=True,
update_allowed=True
),
MIN_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of instances in the group.'),
required=True,
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.INTEGER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.INTEGER,
_('Desired initial number of instances.'),
update_allowed=True
),
HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Not Implemented.'),
implemented=False
),
HEALTH_CHECK_TYPE: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
constraints=[
constraints.AllowedValues(['EC2', 'ELB']),
],
implemented=False
),
LOAD_BALANCER_NAMES: properties.Schema(
properties.Schema.LIST,
_('List of LoadBalancer resources.')
),
VPCZONE_IDENTIFIER: properties.Schema(
properties.Schema.LIST,
_('Use only with Neutron, to list the internal subnet to '
'which the instance will be attached; '
'needed only if multiple exist; '
'list length must be exactly 1.'),
schema=properties.Schema(
properties.Schema.STRING,
_('UUID of the internal subnet to which the instance '
'will be attached.')
)
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to attach to this group.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
}
attributes_schema = {
INSTANCE_LIST: attributes.Schema(
_("A comma-delimited list of server ip addresses. "
"(Heat extension)."),
type=attributes.Schema.STRING
),
}
rolling_update_schema = {
MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
default=0),
MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
default=1),
PAUSE_TIME: properties.Schema(properties.Schema.STRING,
default='PT0S')
}
update_policy_schema = {
ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
schema=rolling_update_schema)
}
def handle_create(self):
self.validate_launchconfig()
return self.create_with_template(self.child_template())
def _make_launch_config_resource(self, name, props):
lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
lc_res_def = rsrc_defn.ResourceDefinition(name,
lc_res_type,
props)
lc_res = resource.Resource(name, lc_res_def, self.stack)
return lc_res
def _get_conf_properties(self):
instance_id = self.properties.get(self.INSTANCE_ID)
if instance_id:
server = self.client_plugin('nova').get_server(instance_id)
instance_props = {
'ImageId': server.image['id'],
'InstanceType': server.flavor['id'],
'KeyName': server.key_name,
'SecurityGroups': [sg['name']
for sg in server.security_groups]
}
conf = self._make_launch_config_resource(self.name,
instance_props)
props = function.resolve(conf.properties.data)
else:
conf, props = super(AutoScalingGroup, self)._get_conf_properties()
vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
if vpc_zone_ids:
props['SubnetId'] = vpc_zone_ids[0]
return conf, props
def check_create_complete(self, task):
"""Invoke the cooldown after creation succeeds."""
done = super(AutoScalingGroup, self).check_create_complete(task)
if done:
self._cooldown_timestamp(
"%s : %s" % (EXACT_CAPACITY, grouputils.get_size(self)))
return done
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if tmpl_diff:
# parse update policy
if 'UpdatePolicy' in tmpl_diff:
up = json_snippet.update_policy(self.update_policy_schema,
self.context)
self.update_policy = up
self.properties = json_snippet.properties(self.properties_schema,
self.context)
if prop_diff:
# Replace instances first if launch configuration has changed
self._try_rolling_update(prop_diff)
if self.properties[self.DESIRED_CAPACITY] is not None:
self.adjust(self.properties[self.DESIRED_CAPACITY],
adjustment_type=EXACT_CAPACITY)
else:
current_capacity = grouputils.get_size(self)
self.adjust(current_capacity, adjustment_type=EXACT_CAPACITY)
def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY,
min_adjustment_step=None):
"""
Adjust the size of the scaling group if the cooldown permits.
"""
if self._cooldown_inprogress():
LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
"cooldown %(cooldown)s"),
{'name': self.name,
'cooldown': self.properties[self.COOLDOWN]})
return
capacity = grouputils.get_size(self)
lower = self.properties[self.MIN_SIZE]
upper = self.properties[self.MAX_SIZE]
new_capacity = _calculate_new_capacity(capacity, adjustment,
adjustment_type,
min_adjustment_step,
lower, upper)
# send a notification before, on-error and on-success.
notif = {
'stack': self.stack,
'adjustment': adjustment,
'adjustment_type': adjustment_type,
'capacity': capacity,
'groupname': self.FnGetRefId(),
'message': _("Start resizing the group %(group)s") % {
'group': self.FnGetRefId()},
'suffix': 'start',
}
notification.send(**notif)
try:
self.resize(new_capacity)
except Exception as resize_ex:
with excutils.save_and_reraise_exception():
try:
notif.update({'suffix': 'error',
'message': six.text_type(resize_ex),
})
notification.send(**notif)
except Exception:
LOG.exception(_LE('Failed sending error notification'))
else:
notif.update({
'suffix': 'end',
'capacity': new_capacity,
'message': _("End resizing the group %(group)s") % {
'group': notif['groupname']},
})
notification.send(**notif)
self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
def _tags(self):
"""Add Identifing Tags to all servers in the group.
This is so the Dimensions received from cfn-push-stats all include
the groupname and stack id.
Note: the group name must match what is returned from FnGetRefId
"""
autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
self.TAG_VALUE: self.FnGetRefId()}]
return super(AutoScalingGroup, self)._tags() + autoscaling_tag
def validate(self):
# check validity of group size
min_size = self.properties[self.MIN_SIZE]
max_size = self.properties[self.MAX_SIZE]
if max_size < min_size:
msg = _("MinSize can not be greater than MaxSize")
raise exception.StackValidationFailed(message=msg)
if min_size < 0:
msg = _("The size of AutoScalingGroup can not be less than zero")
raise exception.StackValidationFailed(message=msg)
if self.properties[self.DESIRED_CAPACITY] is not None:
desired_capacity = self.properties[self.DESIRED_CAPACITY]
if desired_capacity < min_size or desired_capacity > max_size:
msg = _("DesiredCapacity must be between MinSize and MaxSize")
raise exception.StackValidationFailed(message=msg)
# TODO(pasquier-s): once Neutron is able to assign subnets to
# availability zones, it will be possible to specify multiple subnets.
# For now, only one subnet can be specified. The bug #1096017 tracks
# this issue.
if (self.properties.get(self.VPCZONE_IDENTIFIER) and
len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
raise exception.NotSupported(feature=_("Anything other than one "
"VPCZoneIdentifier"))
# validate properties InstanceId and LaunchConfigurationName
# for aws auto scaling group.
# should provide just only one of
if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
instanceId = self.properties.get(self.INSTANCE_ID)
launch_config = self.properties.get(
self.LAUNCH_CONFIGURATION_NAME)
if bool(instanceId) == bool(launch_config):
msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
"must be provided.")
raise exception.StackValidationFailed(message=msg)
super(AutoScalingGroup, self).validate()
def _resolve_attribute(self, name):
'''
heat extension: "InstanceList" returns comma delimited list of server
ip addresses.
'''
if name == self.INSTANCE_LIST:
return u','.join(inst.FnGetAtt('PublicIp')
for inst in grouputils.get_members(self)) or None
def child_template(self):
if self.properties[self.DESIRED_CAPACITY]:
num_instances = self.properties[self.DESIRED_CAPACITY]
else:
num_instances = self.properties[self.MIN_SIZE]
return self._create_template(num_instances)
def resource_mapping():
return {
'AWS::AutoScaling::AutoScalingGroup': AutoScalingGroup,
}
| |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import subprocess
import textwrap
import warnings
from datetime import datetime
import argparse
from builtins import input
from collections import namedtuple
from dateutil.parser import parse as parsedate
import json
import daemon
from daemon.pidfile import TimeoutPIDLockFile
import signal
import sys
import airflow
from airflow import jobs, settings
from airflow import configuration as conf
from airflow.executors import DEFAULT_EXECUTOR
from airflow.models import DagModel, DagBag, TaskInstance, DagPickle, DagRun, Variable
from airflow.utils import db as db_utils
from airflow.utils import logging as logging_utils
from airflow.utils.state import State
from airflow.exceptions import AirflowException
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
def sigint_handler(signal, frame):
sys.exit(0)
def setup_logging(filename):
root = logging.getLogger()
handler = logging.FileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
if not stderr:
stderr = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.err".format(process))
if not stdout:
stdout = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.out".format(process))
if not log:
log = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.log".format(process))
if not pid:
pid = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.pid".format(process))
return pid, stdout, stderr, log
def process_subdir(subdir):
dags_folder = conf.get("core", "DAGS_FOLDER")
dags_folder = os.path.expanduser(dags_folder)
if subdir:
if "DAGS_FOLDER" in subdir:
subdir = subdir.replace("DAGS_FOLDER", dags_folder)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag(args):
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException(
'dag_id could not be found: {}'.format(args.dag_id))
return dagbag.dags[args.dag_id]
def backfill(args, dag=None):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dag = dag or get_dag(args)
if not args.start_date and not args.end_date:
raise AirflowException("Provide a start_date and/or end_date")
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
if args.dry_run:
print("Dry run of DAG {0} on {1}".format(args.dag_id,
args.start_date))
for task in dag.tasks:
print("Task {0}".format(task.task_id))
ti = TaskInstance(task, args.start_date)
ti.dry_run()
else:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
include_adhoc=args.include_adhoc,
local=args.local,
donot_pickle=(args.donot_pickle or
conf.getboolean('core', 'donot_pickle')),
ignore_dependencies=args.ignore_dependencies,
ignore_first_depends_on_past=args.ignore_first_depends_on_past,
pool=args.pool)
def trigger_dag(args):
session = settings.Session()
# TODO: verify dag_id
execution_date = datetime.now()
run_id = args.run_id or "manual__{0}".format(execution_date.isoformat())
dr = session.query(DagRun).filter(
DagRun.dag_id == args.dag_id, DagRun.run_id == run_id).first()
conf = {}
if args.conf:
conf = json.loads(args.conf)
if dr:
logging.error("This run_id already exists")
else:
trigger = DagRun(
dag_id=args.dag_id,
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=conf,
external_trigger=True)
session.add(trigger)
logging.info("Created {}".format(trigger))
session.commit()
def variables(args):
if args.get:
try:
var = Variable.get(args.get,
deserialize_json=args.json,
default_var=args.default)
print(var)
except ValueError as e:
print(e)
if args.set:
Variable.set(args.set[0], args.set[1])
if not args.set and not args.get:
# list all variables
session = settings.Session()
vars = session.query(Variable)
msg = "\n".join(var.key for var in vars)
print(msg)
def pause(args, dag=None):
set_is_paused(True, args, dag)
def unpause(args, dag=None):
set_is_paused(False, args, dag)
def set_is_paused(is_paused, args, dag=None):
dag = dag or get_dag(args)
session = settings.Session()
dm = session.query(DagModel).filter(
DagModel.dag_id == dag.dag_id).first()
dm.is_paused = is_paused
session.commit()
msg = "Dag: {}, paused: {}".format(dag, str(dag.is_paused))
print(msg)
def run(args, dag=None):
db_utils.pessimistic_connection_handling()
if dag:
args.dag_id = dag.dag_id
# Setting up logging
log_base = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
directory = log_base + "/{args.dag_id}/{args.task_id}".format(args=args)
if not os.path.exists(directory):
os.makedirs(directory)
iso = args.execution_date.isoformat()
filename = "{directory}/{iso}".format(**locals())
logging.root.handlers = []
logging.basicConfig(
filename=filename,
level=settings.LOGGING_LEVEL,
format=settings.LOG_FORMAT)
if not args.pickle and not dag:
dag = get_dag(args)
elif not dag:
session = settings.Session()
logging.info('Loading pickle id {args.pickle}'.format(**locals()))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
if args.local:
print("Logging into: " + filename)
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
force=args.force,
pickle_id=args.pickle,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
pool=args.pool)
run_job.run()
elif args.raw:
ti.run(
mark_success=args.mark_success,
force=args.force,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
job_id=args.job_id,
pool=args.pool,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
print((
'Pickled dag {dag} '
'as pickle_id:{pickle_id}').format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = DEFAULT_EXECUTOR
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
force=args.force,
pool=args.pool)
executor.heartbeat()
executor.end()
# Force the log to flush, and set the handler to go back to normal so we
# don't continue logging to the task's log file. The flush is important
# because we subsequently read from the log to insert into S3 or Google
# cloud storage.
logging.root.handlers[0].flush()
logging.root.handlers = []
# store logs remotely
remote_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
# deprecated as of March 2016
if not remote_base and conf.get('core', 'S3_LOG_FOLDER'):
warnings.warn(
'The S3_LOG_FOLDER conf key has been replaced by '
'REMOTE_BASE_LOG_FOLDER. Your conf still works but please '
'update airflow.cfg to ensure future compatibility.',
DeprecationWarning)
remote_base = conf.get('core', 'S3_LOG_FOLDER')
if os.path.exists(filename):
# read log and remove old logs to get just the latest additions
with open(filename, 'r') as logfile:
log = logfile.read()
remote_log_location = filename.replace(log_base, remote_base)
# S3
if remote_base.startswith('s3:/'):
logging_utils.S3Log().write(log, remote_log_location)
# GCS
elif remote_base.startswith('gs:/'):
logging_utils.GCSLog().write(
log,
remote_log_location,
append=True)
# Other
elif remote_base and remote_base != 'None':
logging.error(
'Unsupported remote log location: {}'.format(remote_base))
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
def list_dags(args):
dagbag = DagBag(process_subdir(args.subdir))
print("\n".join(sorted(dagbag.dags)))
def list_tasks(args, dag=None):
dag = dag or get_dag(args)
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
def test(args, dag=None):
dag = dag or get_dag(args)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
ti = TaskInstance(task, args.execution_date)
if args.dry_run:
ti.dry_run()
else:
ti.run(force=True, ignore_dependencies=True, test_mode=True)
def render(args):
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.render_templates()
for attr in task.__class__.template_fields:
print(textwrap.dedent("""\
# ----------------------------------------------------------
# property: {}
# ----------------------------------------------------------
{}
""".format(attr, getattr(task, attr))))
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dag = get_dag(args)
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
dag.clear(
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm)
def webserver(args):
print(settings.HEADER)
from airflow.www.app import cached_app
app = cached_app(conf)
workers = args.workers or conf.get('webserver', 'workers')
worker_timeout = (args.worker_timeout or
conf.get('webserver', 'webserver_worker_timeout'))
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app.run(debug=True, port=args.port, host=args.hostname)
else:
pid, stdout, stderr, log_file = setup_locations("webserver", pid=args.pid)
print(
'Running the Gunicorn server with {workers} {args.workerclass}'
'workers on host {args.hostname} and port '
'{args.port} with a timeout of {worker_timeout}...'.format(**locals()))
sp = subprocess.Popen([
'gunicorn', '-w', str(args.workers), '-k', str(args.workerclass),
'-t', str(args.worker_timeout), '-b', args.hostname + ':' + str(args.port),
'-n', 'airflow-webserver', '--pid', pid,
'airflow.www.app:cached_app()']
)
if args.foreground:
sp.wait()
def scheduler(args):
print(settings.HEADER)
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=process_subdir(args.subdir),
num_runs=args.num_runs,
do_pickle=args.do_pickle)
if not args.foreground:
pid, stdout, stderr, log_file = setup_locations("scheduler", args.pid, args.stdout, args.stderr, args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
job.run()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
job.run()
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename): # noqa
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
def worker(args):
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
}
if not args.foreground:
pid, stdout, stderr, log_file = setup_locations("worker", args.pid, args.stdout, args.stderr, args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
worker.run(**options)
sp.kill()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
worker.run(**options)
sp.kill()
def initdb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.initdb()
print("Done.")
def resetdb(args):
print("DB: " + repr(settings.engine.url))
if args.yes or input(
"This will drop existing tables if they exist. "
"Proceed? (y/n)").upper() == "Y":
logging.basicConfig(level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
db_utils.resetdb()
else:
print("Bail.")
def upgradedb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.upgradedb()
def version(args): # noqa
print(settings.HEADER + " v" + airflow.__version__)
def flower(args):
broka = conf.get('celery', 'BROKER_URL')
args.port = args.port
port = '--port=' + args.port
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
if not args.foreground:
pid, stdout, stderr, log_file = setup_locations("flower", args.pid, args.stdout, args.stderr, args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
sp = subprocess.Popen(['flower', '-b', broka, port, api])
sp.wait()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sp = subprocess.Popen(['flower', '-b', broka, port, api])
sp.wait()
def kerberos(args): # noqa
print(settings.HEADER)
import airflow.security.kerberos
if not args.foreground:
pid, stdout, stderr, log_file = setup_locations("kerberos", args.pid, args.stdout, args.stderr, args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
airflow.security.kerberos.run()
stdout.close()
stderr.close()
else:
airflow.security.kerberos.run()
Arg = namedtuple(
'Arg', ['flags', 'help', 'action', 'default', 'nargs', 'type', 'choices', 'metavar'])
Arg.__new__.__defaults__ = (None, None, None, None, None, None, None)
class CLIFactory(object):
args = {
# Shared
'dag_id': Arg(("dag_id",), "The id of the dag"),
'task_id': Arg(("task_id",), "The id of the task"),
'execution_date': Arg(
("execution_date",), help="The execution date of the DAG",
type=parsedate),
'task_regex': Arg(
("-t", "--task_regex"),
"The regex to filter specific task_ids to backfill (optional)"),
'subdir': Arg(
("-sd", "--subdir"),
"File location or directory from which to look for the dag",
default=DAGS_FOLDER),
'start_date': Arg(
("-s", "--start_date"), "Override start_date YYYY-MM-DD",
type=parsedate),
'end_date': Arg(
("-e", "--end_date"), "Override end_date YYYY-MM-DD",
type=parsedate),
'dry_run': Arg(
("-dr", "--dry_run"), "Perform a dry run", "store_true"),
'pid': Arg(
("--pid", ), "PID file location",
nargs='?'),
'foreground': Arg(
("-f", "--foreground"), "Do not detach. Run in foreground", "store_true"),
'stderr': Arg(
("--stderr", ), "Redirect stderr to this file"),
'stdout': Arg(
("--stdout", ), "Redirect stdout to this file"),
'log_file': Arg(
("-l", "--log-file"), "Location of the log file"),
# backfill
'mark_success': Arg(
("-m", "--mark_success"),
"Mark jobs as succeeded without running them", "store_true"),
'local': Arg(
("-l", "--local"),
"Run the task using the LocalExecutor", "store_true"),
'donot_pickle': Arg(
("-x", "--donot_pickle"), (
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
"store_true"),
'include_adhoc': Arg(
("-a", "--include_adhoc"),
"Include dags with the adhoc parameter.", "store_true"),
'bf_ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction "
"with task_regex"),
"store_true"),
'bf_ignore_first_depends_on_past': Arg(
("-I", "--ignore_first_depends_on_past"),
(
"Ignores depends_on_past dependencies for the first "
"set of tasks only (subsequent executions in the backfill "
"DO respect depends_on_past)."),
"store_true"),
'pool': Arg(("--pool",), "Resource pool to use"),
# list_dags
'tree': Arg(("-t", "--tree"), "Tree view", "store_true"),
# clear
'upstream': Arg(
("-u", "--upstream"), "Include upstream tasks", "store_true"),
'only_failed': Arg(
("-f", "--only_failed"), "Only failed jobs", "store_true"),
'only_running': Arg(
("-r", "--only_running"), "Only running jobs", "store_true"),
'downstream': Arg(
("-d", "--downstream"), "Include downstream tasks", "store_true"),
'no_confirm': Arg(
("-c", "--no_confirm"),
"Do not request confirmation", "store_true"),
# trigger_dag
'run_id': Arg(("-r", "--run_id"), "Helps to indentify this run"),
'conf': Arg(
('-c', '--conf'),
"json string that gets pickled into the DagRun's conf attribute"),
# variables
'set': Arg(
("-s", "--set"),
nargs=2,
metavar=('KEY', 'VAL'),
help="Set a variable"),
'get': Arg(
("-g", "--get"),
metavar='KEY',
help="Get value of a variable"),
'default': Arg(
("-d", "--default"),
metavar="VAL",
default=None,
help="Default value returned if variable does not exist"),
'json': Arg(
("-j", "--json"),
help="Deserialize JSON variable",
action="store_true"),
# kerberos
'principal': Arg(
("principal",), "kerberos principal",
nargs='?', default=conf.get('kerberos', 'principal')),
'keytab': Arg(
("-kt", "--keytab"), "keytab",
nargs='?', default=conf.get('kerberos', 'keytab')),
# run
'force': Arg(
("-f", "--force"),
"Force a run regardless or previous success", "store_true"),
'raw': Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true"),
'ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
"Ignore upstream and depends_on_past dependencies", "store_true"),
'ignore_depends_on_past': Arg(
("-I", "--ignore_depends_on_past"),
"Ignore depends_on_past dependencies (but respect "
"upstream dependencies)",
"store_true"),
'ship_dag': Arg(
("--ship_dag",),
"Pickles (serializes) the DAG and ships it to the worker",
"store_true"),
'pickle': Arg(
("-p", "--pickle"),
"Serialized pickle object of the entire dag (used internally)"),
'job_id': Arg(("-j", "--job_id"), argparse.SUPPRESS),
# webserver
'port': Arg(
("-p", "--port"),
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="The port on which to run the server"),
'workers': Arg(
("-w", "--workers"),
default=conf.get('webserver', 'WORKERS'),
type=int,
help="Number of workers to run the webserver on"),
'workerclass': Arg(
("-k", "--workerclass"),
default=conf.get('webserver', 'WORKER_CLASS'),
choices=['sync', 'eventlet', 'gevent', 'tornado'],
help="The worker class to use for gunicorn"),
'worker_timeout': Arg(
("-t", "--worker_timeout"),
default=conf.get('webserver', 'WEB_SERVER_WORKER_TIMEOUT'),
type=int,
help="The timeout for waiting on webserver workers"),
'hostname': Arg(
("-hn", "--hostname"),
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server"),
'debug': Arg(
("-d", "--debug"),
"Use the server that ships with Flask in debug mode",
"store_true"),
# resetdb
'yes': Arg(
("-y", "--yes"),
"Do not prompt to confirm reset. Use with care!",
"store_true",
default=False),
# scheduler
'dag_id_opt': Arg(("-d", "--dag_id"), help="The id of the dag to run"),
'num_runs': Arg(
("-n", "--num_runs"),
default=None, type=int,
help="Set the number of runs to execute before exiting"),
# worker
'do_pickle': Arg(
("-p", "--do_pickle"),
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code."),
action="store_true"),
'queues': Arg(
("-q", "--queues"),
help="Comma delimited list of queues to serve",
default=conf.get('celery', 'DEFAULT_QUEUE')),
'concurrency': Arg(
("-c", "--concurrency"),
type=int,
help="The number of worker processes",
default=conf.get('celery', 'celeryd_concurrency')),
# flower
'broker_api': Arg(("-a", "--broker_api"), help="Broker api"),
'flower_port': Arg(
("-p", "--port"),
default=conf.get('celery', 'FLOWER_PORT'),
type=int,
help="The port on which to run the server"),
'task_params': Arg(
("-tp", "--task_params"),
help="Sends a JSON params dict to the task"),
}
subparsers = (
{
'func': backfill,
'help': "Run subsections of a DAG for a specified date range",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date',
'mark_success', 'local', 'donot_pickle', 'include_adhoc',
'bf_ignore_dependencies', 'bf_ignore_first_depends_on_past',
'subdir', 'pool', 'dry_run')
}, {
'func': list_tasks,
'help': "List the tasks within a DAG",
'args': ('dag_id', 'tree', 'subdir'),
}, {
'func': clear,
'help': "Clear a set of task instance, as if they never ran",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date', 'subdir',
'upstream', 'downstream', 'no_confirm', 'only_failed',
'only_running'),
}, {
'func': pause,
'help': "Pause a DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': unpause,
'help': "Pause a DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': trigger_dag,
'help': "Trigger a DAG run",
'args': ('dag_id', 'subdir', 'run_id', 'conf'),
}, {
'func': variables,
'help': "List all variables",
"args": ('set', 'get', 'json', 'default'),
}, {
'func': kerberos,
'help': "Start a kerberos ticket renewer",
'args': ('principal', 'keytab', 'pid',
'foreground', 'stdout', 'stderr', 'log_file'),
}, {
'func': render,
'help': "Render a task instance's template(s)",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': run,
'help': "Run a single task instance",
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir',
'mark_success', 'force', 'pool',
'local', 'raw', 'ignore_dependencies',
'ignore_depends_on_past', 'ship_dag', 'pickle', 'job_id'),
}, {
'func': initdb,
'help': "Initialize the metadata database",
'args': tuple(),
}, {
'func': list_dags,
'help': "List all the DAGs",
'args': ('subdir',),
}, {
'func': task_state,
'help': "Get the status of a task instance",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': serve_logs,
'help': "Serve logs generate by worker",
'args': tuple(),
}, {
'func': test,
'help': (
"Test a task instance. This will run a task without checking for "
"dependencies or recording it's state in the database."),
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir', 'dry_run',
'task_params'),
}, {
'func': webserver,
'help': "Start a Airflow webserver instance",
'args': ('port', 'workers', 'workerclass', 'worker_timeout', 'hostname',
'pid', 'foreground', 'stdout', 'stderr', 'log_file',
'debug'),
}, {
'func': resetdb,
'help': "Burn down and rebuild the metadata database",
'args': ('yes',),
}, {
'func': upgradedb,
'help': "Upgrade metadata database to latest version",
'args': tuple(),
}, {
'func': scheduler,
'help': "Start a scheduler scheduler instance",
'args': ('dag_id_opt', 'subdir', 'num_runs', 'do_pickle',
'pid', 'foreground', 'stdout', 'stderr', 'log_file'),
}, {
'func': worker,
'help': "Start a Celery worker node",
'args': ('do_pickle', 'queues', 'concurrency',
'pid', 'foreground', 'stdout', 'stderr', 'log_file'),
}, {
'func': flower,
'help': "Start a Celery Flower",
'args': ('flower_port', 'broker_api',
'pid', 'foreground', 'stdout', 'stderr', 'log_file'),
}, {
'func': version,
'help': "Show the version",
'args': tuple(),
},
)
subparsers_dict = {sp['func'].__name__: sp for sp in subparsers}
dag_subparsers = (
'list_tasks', 'backfill', 'test', 'run', 'pause', 'unpause')
@classmethod
def get_parser(cls, dag_parser=False):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
help='sub-command help', dest='subcommand')
subparsers.required = True
subparser_list = cls.dag_subparsers if dag_parser else cls.subparsers_dict.keys()
for sub in subparser_list:
sub = cls.subparsers_dict[sub]
sp = subparsers.add_parser(sub['func'].__name__, help=sub['help'])
for arg in sub['args']:
if 'dag_id' in arg and dag_parser:
continue
arg = cls.args[arg]
kwargs = {
f: getattr(arg, f)
for f in arg._fields if f != 'flags' and getattr(arg, f)}
sp.add_argument(*arg.flags, **kwargs)
sp.set_defaults(func=sub['func'])
return parser
| |
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
# Copyright 2014 Cisco Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_log import log
import six
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _, _LW
from ceilometer import sample
LOG = log.getLogger(__name__)
DiskIOData = collections.namedtuple(
'DiskIOData',
'r_bytes r_requests w_bytes w_requests per_disk_requests',
)
DiskRateData = collections.namedtuple('DiskRateData',
['read_bytes_rate',
'read_requests_rate',
'write_bytes_rate',
'write_requests_rate',
'per_disk_rate'])
DiskLatencyData = collections.namedtuple('DiskLatencyData',
['disk_latency',
'per_disk_latency'])
DiskIOPSData = collections.namedtuple('DiskIOPSData',
['iops_count',
'per_disk_iops'])
DiskInfoData = collections.namedtuple('DiskInfoData',
['capacity',
'allocation',
'physical',
'per_disk_info'])
@six.add_metaclass(abc.ABCMeta)
class _Base(pollsters.BaseComputePollster):
DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:",
"%s %s:",
"read-requests=%d",
"read-bytes=%d",
"write-requests=%d",
"write-bytes=%d",
"errors=%d",
])
CACHE_KEY_DISK = 'diskio'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK, {})
if instance.id not in i_cache:
r_bytes = 0
r_requests = 0
w_bytes = 0
w_requests = 0
per_device_read_bytes = {}
per_device_read_requests = {}
per_device_write_bytes = {}
per_device_write_requests = {}
for disk, info in inspector.inspect_disks(instance):
LOG.debug(self.DISKIO_USAGE_MESSAGE,
instance, disk.device, info.read_requests,
info.read_bytes, info.write_requests,
info.write_bytes, info.errors)
r_bytes += info.read_bytes
r_requests += info.read_requests
w_bytes += info.write_bytes
w_requests += info.write_requests
# per disk data
per_device_read_bytes[disk.device] = info.read_bytes
per_device_read_requests[disk.device] = info.read_requests
per_device_write_bytes[disk.device] = info.write_bytes
per_device_write_requests[disk.device] = info.write_requests
per_device_requests = {
'read_bytes': per_device_read_bytes,
'read_requests': per_device_read_requests,
'write_bytes': per_device_write_bytes,
'write_requests': per_device_write_requests,
}
i_cache[instance.id] = DiskIOData(
r_bytes=r_bytes,
r_requests=r_requests,
w_bytes=w_bytes,
w_requests=w_requests,
per_disk_requests=per_device_requests,
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(instance, c_data):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
instance_name = util.instance_name(instance)
try:
c_data = self._populate_cache(
self.inspector,
cache,
instance,
)
for s in self._get_samples(instance, c_data):
yield s
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class ReadRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.read.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=c_data.r_requests,
additional_metadata={
'device': c_data.per_disk_requests['read_requests'].keys()}
)]
class PerDeviceReadRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'read_requests']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class ReadBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.read.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=c_data.r_bytes,
additional_metadata={
'device': c_data.per_disk_requests['read_bytes'].keys()},
)]
class PerDeviceReadBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'read_bytes']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.write.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=c_data.w_requests,
additional_metadata={
'device': c_data.per_disk_requests['write_requests'].keys()},
)]
class PerDeviceWriteRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'write_requests']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.write.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=c_data.w_bytes,
additional_metadata={
'device': c_data.per_disk_requests['write_bytes'].keys()},
)]
class PerDeviceWriteBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'write_bytes']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskRatesPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_RATE = 'diskio-rate'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {})
if instance.id not in i_cache:
r_bytes_rate = 0
r_requests_rate = 0
w_bytes_rate = 0
w_requests_rate = 0
per_disk_r_bytes_rate = {}
per_disk_r_requests_rate = {}
per_disk_w_bytes_rate = {}
per_disk_w_requests_rate = {}
disk_rates = inspector.inspect_disk_rates(
instance, self._inspection_duration)
for disk, info in disk_rates:
r_bytes_rate += info.read_bytes_rate
r_requests_rate += info.read_requests_rate
w_bytes_rate += info.write_bytes_rate
w_requests_rate += info.write_requests_rate
per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate
per_disk_r_requests_rate[disk.device] = info.read_requests_rate
per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate
per_disk_w_requests_rate[disk.device] = (
info.write_requests_rate)
per_disk_rate = {
'read_bytes_rate': per_disk_r_bytes_rate,
'read_requests_rate': per_disk_r_requests_rate,
'write_bytes_rate': per_disk_w_bytes_rate,
'write_requests_rate': per_disk_w_requests_rate,
}
i_cache[instance.id] = DiskRateData(
r_bytes_rate,
r_requests_rate,
w_bytes_rate,
w_requests_rate,
per_disk_rate
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
try:
disk_rates_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_rate in self._get_samples(instance, disk_rates_info):
yield disk_rate
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class ReadBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.read.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=disk_rates_info.read_bytes_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'read_bytes_rate'].keys()},
)]
class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'read_bytes_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class ReadRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.read.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=disk_rates_info.read_requests_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'read_requests_rate'].keys()},
)]
class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'read_requests_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.write.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=disk_rates_info.write_bytes_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'write_bytes_rate'].keys()},
)]
class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'write_bytes_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.write.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=disk_rates_info.write_requests_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'write_requests_rate'].keys()},
)]
class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'write_requests_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskLatencyPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_LATENCY = 'disk-latency'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {})
if instance.id not in i_cache:
latency = 0
per_device_latency = {}
disk_rates = inspector.inspect_disk_latency(instance)
for disk, stats in disk_rates:
latency += stats.disk_latency
per_device_latency[disk.device] = (
stats.disk_latency)
per_disk_latency = {
'disk_latency': per_device_latency
}
i_cache[instance.id] = DiskLatencyData(
latency,
per_disk_latency
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_latency_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_latency in self._get_samples(instance,
disk_latency_info):
yield disk_latency
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class DiskLatencyPollster(_DiskLatencyPollsterBase):
def _get_samples(self, instance, disk_latency_info):
return [util.make_sample_from_instance(
instance,
name='disk.latency',
type=sample.TYPE_GAUGE,
unit='ms',
volume=disk_latency_info.disk_latency / 1000
)]
class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase):
def _get_samples(self, instance, disk_latency_info):
samples = []
for disk, value in six.iteritems(disk_latency_info.per_disk_latency[
'disk_latency']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.latency',
type=sample.TYPE_GAUGE,
unit='ms',
volume=value / 1000,
resource_id="%s-%s" % (instance.id, disk)
))
return samples
class _DiskIOPSPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_IOPS = 'disk-iops'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_IOPS, {})
if instance.id not in i_cache:
iops = 0
per_device_iops = {}
disk_iops_count = inspector.inspect_disk_iops(instance)
for disk, stats in disk_iops_count:
iops += stats.iops_count
per_device_iops[disk.device] = (stats.iops_count)
per_disk_iops = {
'iops_count': per_device_iops
}
i_cache[instance.id] = DiskIOPSData(
iops,
per_disk_iops
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_iops_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_iops in self._get_samples(instance,
disk_iops_info):
yield disk_iops
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
'%(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class DiskIOPSPollster(_DiskIOPSPollsterBase):
def _get_samples(self, instance, disk_iops_info):
return [util.make_sample_from_instance(
instance,
name='disk.iops',
type=sample.TYPE_GAUGE,
unit='count/s',
volume=disk_iops_info.iops_count
)]
class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase):
def _get_samples(self, instance, disk_iops_info):
samples = []
for disk, value in six.iteritems(disk_iops_info.per_disk_iops[
'iops_count']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.iops',
type=sample.TYPE_GAUGE,
unit='count/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk)
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskInfoPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_INFO = 'diskinfo'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {})
if instance.id not in i_cache:
all_capacity = 0
all_allocation = 0
all_physical = 0
per_disk_capacity = {}
per_disk_allocation = {}
per_disk_physical = {}
disk_info = inspector.inspect_disk_info(
instance)
for disk, info in disk_info:
all_capacity += info.capacity
all_allocation += info.allocation
all_physical += info.physical
per_disk_capacity[disk.device] = info.capacity
per_disk_allocation[disk.device] = info.allocation
per_disk_physical[disk.device] = info.physical
per_disk_info = {
'capacity': per_disk_capacity,
'allocation': per_disk_allocation,
'physical': per_disk_physical,
}
i_cache[instance.id] = DiskInfoData(
all_capacity,
all_allocation,
all_physical,
per_disk_info
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_size_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_info in self._get_samples(instance, disk_size_info):
yield disk_info
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'), (
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__}))
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s '
'(%(instance_id)s) : %(error)s') % (
{'name': instance_name,
'instance_id': instance.id,
'error': err}))
class CapacityPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.capacity',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.capacity,
additional_metadata={
'device': disk_info.per_disk_info[
'capacity'].keys()},
)]
class PerDeviceCapacityPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'capacity']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.capacity',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class AllocationPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.allocation',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.allocation,
additional_metadata={
'device': disk_info.per_disk_info[
'allocation'].keys()},
)]
class PerDeviceAllocationPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'allocation']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.allocation',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class PhysicalPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.usage',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.physical,
additional_metadata={
'device': disk_info.per_disk_info[
'physical'].keys()},
)]
class PerDevicePhysicalPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'physical']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.usage',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
| |
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
TEST BUILD & RUN
"""
import sys
import os
import json
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.test_api import test_path_to_name, find_tests, print_tests, build_tests, test_spec_from_test_builds
from tools.options import get_default_options_parser
from tools.build_api import build_project, build_library
from tools.targets import TARGET_MAP
from tools.utils import mkdir
from tools.test_exporters import ReportExporter, ResultExporterType
if __name__ == '__main__':
try:
# Parse Options
parser = get_default_options_parser()
parser.add_option("-D", "",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_option("-j", "--jobs",
type="int",
dest="jobs",
default=0,
help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_option("--source", dest="source_dir",
default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append")
parser.add_option("--build", dest="build_dir",
default=None, help="The build (output) directory")
parser.add_option("-l", "--list", action="store_true", dest="list",
default=False, help="List (recursively) available tests in order and exit")
parser.add_option("-p", "--paths", dest="paths",
default=None, help="Limit the tests to those within the specified comma separated list of paths")
format_choices = ["list", "json"]
format_default_choice = "list"
format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
parser.add_option("-f", "--format", type="choice", dest="format",
choices=format_choices, default=format_default_choice, help=format_help)
parser.add_option("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
default=None, help="Continue trying to build all tests if a build failure occurs")
parser.add_option("-n", "--names", dest="names",
default=None, help="Limit the tests to a comma separated list of names")
parser.add_option("--test-spec", dest="test_spec",
default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
parser.add_option("--build-report-junit", dest="build_report_junit",
default=None, help="Destination path for a build report in the JUnit xml format")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
(options, args) = parser.parse_args()
# Filter tests by path if specified
if options.paths:
all_paths = options.paths.split(",")
else:
all_paths = ["."]
all_tests = {}
tests = {}
# Find all tests in the relevant paths
for path in all_paths:
all_tests.update(find_tests(path))
# Filter tests by name if specified
if options.names:
all_names = options.names.split(",")
all_tests_keys = all_tests.keys()
for name in all_names:
if name in all_tests_keys:
tests[name] = all_tests[name]
else:
print "[Warning] Test with name '%s' was not found in the available tests" % (name)
else:
tests = all_tests
if options.list:
# Print available tests in order and exit
print_tests(tests, options.format)
sys.exit(0)
else:
# Build all tests
if not options.build_dir:
print "[ERROR] You must specify a build path"
sys.exit(1)
base_source_paths = options.source_dir
# Default base source path is the current directory
if not base_source_paths:
base_source_paths = ['.']
target = TARGET_MAP[options.mcu]
build_report = {}
build_properties = {}
library_build_success = True
try:
# Build sources
build_library(base_source_paths, options.build_dir, target, options.tool,
options=options.options,
jobs=options.jobs,
clean=options.clean,
report=build_report,
properties=build_properties,
name="mbed-build",
macros=options.macros,
verbose=options.verbose,
archive=False)
except Exception, e:
library_build_success = False
print "Failed to build library"
if library_build_success:
# Build all the tests
test_build_success, test_build = build_tests(tests, [options.build_dir], options.build_dir, target, options.tool,
options=options.options,
clean=options.clean,
report=build_report,
properties=build_properties,
macros=options.macros,
verbose=options.verbose,
jobs=options.jobs,
continue_on_build_fail=options.continue_on_build_fail)
# If a path to a test spec is provided, write it to a file
if options.test_spec:
test_spec_data = test_spec_from_test_builds(test_build)
# Create the target dir for the test spec if necessary
# mkdir will not create the dir if it already exists
test_spec_dir = os.path.dirname(options.test_spec)
if test_spec_dir:
mkdir(test_spec_dir)
try:
with open(options.test_spec, 'w') as f:
f.write(json.dumps(test_spec_data, indent=2))
except IOError, e:
print "[ERROR] Error writing test spec to file"
print e
# If a path to a JUnit build report spec is provided, write it to a file
if options.build_report_junit:
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
if status:
sys.exit(0)
else:
sys.exit(1)
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except Exception,e:
import traceback
traceback.print_exc(file=sys.stdout)
print "[ERROR] %s" % str(e)
sys.exit(1)
| |
import pexpect
import argparse
s_imports="""
import os
import signal
import datetime
import SocketServer
import socket
import threading
import Queue
import sys
import time
import subprocess
import re
"""
exec s_imports
# -tp targetPort -sh sshHost -lp listenPort [-su sshUser] [-sp sshPort] [-sw sshPassword] [--force] [--kill]
argParser = argparse.ArgumentParser()
argParser.add_argument('-tp', action='store', required=True, type = int)
argParser.add_argument('-sh', action='store', required=True)
argParser.add_argument('-lp', action='store', required=True, type = int)
argParser.add_argument('-su', action='store', required=True, default=None)
argParser.add_argument('-sp', action='store', required=False, type = int, default=22)
argParser.add_argument('-sw', action='store', required=False, default=None)
argParser.add_argument('-di', action='store', required=False, default="in")
argParser.add_argument('--force', action='store_true', required=False, default=False)
argParser.add_argument('--kill', action='store_true', required=False, default=False)
argParser.add_argument('--killall', action='store_true', required=False, default=False)
args = argParser.parse_args()
ssh_cmd = "ssh %s@%s -p %d -t -R %d:localhost:%d python"
ssh_user = args.su
ssh_host = args.sh
ssh_port = args.sp
ssh_password = args.sw
# A donde se hace la ultima conexion, el objetivo real externo a esta app
final_port = args.tp
final_host = 'localhost'
# Donde se escucha la primera conexion, donde comienza la cadena de conexiones
final_listen_port = args.lp
force = args.force
kill = args.kill
killall = args.killall
pyprompt = ">>>"
pwprompt = "password:"
serverEventQueue = Queue.Queue()
def listProcess():
hz = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
btime = None
for line in open('/proc/stat').readlines():
x = line.split( )
if (x[0] == 'btime'):
btime = x[1]
break
if (btime == None):
raise Exception('Can\'t get boot time')
procs = []
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read().split("\x00")[:-1]
exe = os.readlink(os.path.join('/proc', pid, 'exe'))
cwd = os.readlink(os.path.join('/proc', pid, 'cwd'))
except IOError: # proc has already terminated
continue
except OSError: # proc has already terminated
continue
cmd2 = []
for x in cmd:
cmd2.append("\"" + x.replace("\"", "\\\"") + "\"")
qcmd = " ".join(cmd2)
with open("/proc/%d/stat" % (int(pid),)) as fp:
x = fp.read().split(' ')[21]
starttime = float(btime) + (float(x) / hz )
procs.append({ 'pid':int(pid), 'cmd':cmd, 'qcmd':qcmd, 'cwd':cwd, 'starttimes':starttime, 'starttimestr':str(datetime.datetime.fromtimestamp(starttime)), 'exe':exe })
return procs
def killProc(pid):
try:
os.kill(pid, signal.SIGTERM)
except Exception as e:
s = str(e)
log(" Error: " + s)
if ('No such process' in s):
pass
else:
raise e
def ensureUnique(force = False, kill = False, killall = False):
def relArgs(cmd, cwd=os.getcwd()):
_di = "in";
_sh = None;
_lp = None;
_script = None;
if ("python" in cmd[0]):
_script = cmd[1]
if (_script != None and not _script.startswith("/")):
_script = os.path.normpath(os.path.join(cwd, _script))
for i in range(1,len(cmd)):
if cmd[i-1] == '-sh':
_sh = cmd[i]
elif cmd[i-1] == '-di':
_di = cmd[i]
elif cmd[i-1] == '-lp':
_lp = cmd[i]
if (_di == "in"):
return {"script":_script, "di":_di, "sh":_sh, "lp":_lp}
else:
return {"script":_script, "di":_di, "sh":"localhost", "lp":_lp}
procs = listProcess()
self = None
for p in procs:
if (p['pid'] == os.getpid()):
self = p
#debug("me " + repr(self))
_args = relArgs(self['cmd'])
debug("margs = %s" % _args)
for p in procs:
#debug("p " + repr(p))
if (p['exe'] != self['exe']):
continue
if (p['pid'] == os.getpid()):
continue
_pargs = relArgs(p['cmd'], p['cwd'])
debug("pargs = %s" % _pargs)
if (_pargs['script'] != _args['script']):
continue
if (killall):
log("killing sibiling process: %d" % p['pid'])
killProc(p['pid'])
continue
if (_pargs==_args):
debug("sibiling " + repr(p))
if (force or kill):
log("killing sibiling process: %d" % p['pid'])
killProc(p['pid'])
continue
x = self['starttimes'] - p['starttimes']
if (x == 0):
x = self['pid'] - p['pid']
if (x > 0):
log("This is redundant process %d, exit" % os.getpid())
exit(1)
def log(s):
print "%s %d LOG: %s" %(str(datetime.datetime.now()), os.getpid(), s)
sys.stdout.flush()
def debug(s):
print "%s %d DEBUG: %s" %(str(datetime.datetime.now()), os.getpid(), s)
sys.stdout.flush()
def debugServer(s):
print "%s %d DEBUG SERVER: %s" %(str(datetime.datetime.now()), os.getpid(), s)
sys.stdout.flush()
def debugServerXFer(s):
#print "%s %d DEBUG SERVER: %s" %(str(datetime.datetime.now()), os.getpid(), s)
sys.stdout.flush()
s_debugServer="""
def debugServer(s):
# with open('log', 'a') as f:
# f.write( str(s) + '''
#''')
pass
def debugServerXFer(s):
# with open('log', 'a') as f:
# f.write( str(s) + '''
#''')
pass
"""
s_startServer="""
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def startServer(target, serverPort):
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
def socketCopy(src, dst, dir):
dir = threading.current_thread().name
try:
while True:
data = src.recv(1024*32)
debugServerXFer("%s: %s" % (dir, repr(data)))
if not data:
debugServerXFer("%s: %s" % (dir, "EOF, closing"))
break
dst.sendall(data)
except Exception as e:
debugServer("%s: %s" % (dir, "Error: " + repr(e)))
finally:
dst.shutdown(socket.SHUT_WR)
dst.close()
c1 = self.request
c1.settimeout(240)
try:
c2 = socket.socket()
debugServer("Connecting to " + repr(target))
c2.connect(target)
c2.settimeout(240)
try:
debugServer(" Connected OK")
t21 = threading.Thread(group=None, target=socketCopy, name="cp-"+repr(c2.getpeername())+">>"+repr(c1.getpeername()), args=(c2, c1, '>>'))
t12 = threading.Thread(group=None, target=socketCopy, name="cp-"+repr(c2.getpeername())+"<<"+repr(c1.getpeername()), args=(c1, c2, '<<'))
t12.daemon = True
t12.start()
t21.daemon = True
t21.start()
t12.join()
debugServer("Thread " + t12.name + " finnished")
t21.join()
debugServer("Thread " + t21.name + " finnished")
finally:
c2.close()
finally:
c1.close()
server = ThreadedTCPServer(("0.0.0.0", serverPort), ThreadedTCPRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return server
"""
exec s_startServer
def connectSshPy(host, ssh_port, user, ssh_password, tunnelPortL, tunnelPortR):
cmd = ssh_cmd % (user, host, ssh_port, tunnelPortL, tunnelPortR)
debug( " -- running cmd " + cmd)
ssh = pexpect.spawn (cmd)
ssh.logfile=sys.stderr
i=ssh.expect([pyprompt, pwprompt, "Name or service not known", pexpect.TIMEOUT])
ssh.logfile=None
if (i == 1 and ssh_password != None):
ssh.send(ssh_password + "\n")
ssh.expect(pyprompt)
elif (i >= 2):
raise Exception(ssh.before + ssh.match)
return ssh
def runRemotePy(ssh, script):
debug("@@@ " + script.strip()[:40] + "...")
sep = "'--8<--- "+repr(datetime.datetime.now())+"'"
ssh.send('script="""\n')
ssh.send(script.replace('"""', '""" + ' + '\'"""\'' + ' + """') + "\n")
ssh.send('""" # ' + sep + '\n')
ssh.expect_exact(sep)
ssh.expect_exact(pyprompt)
ssh.send('exec script\n')
ssh.expect_exact('exec script')
ssh.expect_exact(pyprompt)
debug ("Executed remote script")
return ssh.before.strip()
s_wdogConfig = """
wdogTimeoutInterval = 120
wdogTimeoutCheckInterval = wdogTimeoutInterval / 2
wdogTimeoutResetInterval = wdogTimeoutInterval / 2
"""
exec s_wdogConfig
s_wdogRemote = """
wdogTimeout = None
def wdogReset():
wdogTimeout = datetime.datetime.now() + datetime.timedelta(seconds=wdogTimeoutInterval)
def wdogTimeoutCheck():
if (datetime.datetime.now() > wdogTimeout):
debugServer("WATCHDOG TIMEOUT")
exit(1)
wdogTimer = threading.Timer(wdogTimeoutCheckInterval, wdogTimeoutCheck)
wdogTimer.start()
wdogReset()
"""
s_killPortThief="""
def killPortThief(port):
#Proto Recv-Q Send-Q Local Address Foreign Address State User Inode PID/Program name
#tcp 0 0 0.0.0.0:3422 0.0.0.0:* LISTEN 22622/python
uid = os.getuid()
repeat = True
while repeat:
repeat = False
out = subprocess.Popen(["netstat", "-l", "-n", "-t", "-e", "-p"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].splitlines()
while True:
h = out.pop(0)
if h.find("Local Address") >= 0:
break
#debugServer(h)
hUser = re.search("User\\s*", h)
hProc = re.search("PID\/Program name\\s*", h)
hLAddr = re.search("Local Address\\s*", h)
for l in out:
#debugServer(l)
_uid = int(l[hUser.start():hUser.end()].strip())
proc = l[hProc.start():hProc.end()].strip()
laddr = l[hLAddr.start():hLAddr.end()].strip()
if (laddr.endswith(":%d" % port)):
debugServer("user %s, process %s, laddr %s " % (uid, proc, laddr))
if (uid == _uid):
debugServer("local port %d is in use by process %s, killing it..." % (port, proc))
os.kill(int(proc.split('/')[0]), signal.SIGTERM)
time.sleep(1)
repeat = True
else:
debugServer("local port %d is in use by process %s. It is owned by another user, aborting." % (port, proc))
exit(1)
"""
def sigterm_handler(_signo, _stack_frame):
exit(2)
try:
signal.signal(signal.SIGTERM, sigterm_handler)
ensureUnique(force, kill, killall)
if (kill or killall):
exit(0)
log("Starting up, pid=%d" % (os.getpid(),))
while True:
try:
server = startServer((final_host, final_port), 0)
log( "- Server listening on %s:%d" % server.server_address)
try:
log( "- Connecting to remote host")
ssh = connectSshPy(ssh_host, ssh_port, ssh_user, ssh_password, server.server_address[1], server.server_address[1])
log( "- Connected, ssh PID = %d" % ssh.pid)
try:
runRemotePy(ssh, s_wdogConfig)
runRemotePy(ssh, s_wdogRemote)
#ssh.logfile=sys.stderr
# Transmit and run code on remote host
runRemotePy(ssh, s_imports)
runRemotePy(ssh, s_debugServer)
runRemotePy(ssh, s_startServer)
runRemotePy(ssh, "wdogReset()")
# check port free on remote host, kill process that uses it
runRemotePy(ssh, s_killPortThief)
ssh.logfile=sys.stderr
runRemotePy(ssh, "killPortThief(%d)" % final_listen_port)
ssh.logfile=None
runRemotePy(ssh, "wdogReset()")
log( runRemotePy(ssh, "startServer(('localhost', %d), %d)" % (server.server_address[1],final_listen_port) ))
# Expect later output and terminate
#ssh.interact()
ssh.logfile=sys.stderr
while True:
runRemotePy(ssh, "wdogReset()")
time.sleep(wdogTimeoutResetInterval)
#ssh.expect (pexpect.EOF, timeout=60*60*24)
finally:
try:
ssh.close(force=True)
except:
pass
finally:
try:
server.close()
except:
pass
except KeyboardInterrupt:
log( "aborted process %d" % os.getpid())
break
except Exception as e:
log(repr(e))
time.sleep(10)
finally:
pass
| |
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# Originally contributed by Check Point Software Technologies, Ltd.
import logging
import os
import subprocess
import time
import shutil
import shlex
from lib.cuckoo.common.abstracts import Machinery
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.core.resultserver import ResultServer
log = logging.getLogger(__name__)
class Avd(Machinery):
"""Virtualization layer for Android Emulator."""
def _initialize_check(self):
"""Runs all checks when a machine manager is initialized.
@raise CuckooMachineError: if the android emulator is not found.
"""
self.emulator_processes = {}
if not self.options.avd.emulator_path:
raise CuckooCriticalError("emulator path missing, "
"please add it to the config file")
if not os.path.exists(self.options.avd.emulator_path):
raise CuckooCriticalError("emulator not found at "
"specified path \"%s\"" %
self.options.avd.emulator_path)
if not self.options.avd.adb_path:
raise CuckooCriticalError("adb path missing, "
"please add it to the config file")
if not os.path.exists(self.options.avd.adb_path):
raise CuckooCriticalError("adb not found at "
"specified path \"%s\"" %
self.options.avd.adb_path)
if not self.options.avd.avd_path:
raise CuckooCriticalError("avd path missing, "
"please add it to the config file")
if not os.path.exists(self.options.avd.avd_path):
raise CuckooCriticalError("avd not found at "
"specified path \"%s\"" %
self.options.avd.avd_path)
if not self.options.avd.reference_machine:
raise CuckooCriticalError("reference machine path missing, "
"please add it to the config file")
machine_path = os.path.join(self.options.avd.avd_path,
self.options.avd.reference_machine)
if not os.path.exists("%s.avd" % machine_path) or \
not os.path.exists("%s.ini" % machine_path):
raise CuckooCriticalError("reference machine not found at "
"specified path \"%s\"" % machine_path)
def start(self, label, task):
"""Start a virtual machine.
@param label: virtual machine name.
@param task: task object.
@raise CuckooMachineError: if unable to start.
"""
log.debug("Starting vm %s" % label)
self.duplicate_reference_machine(label)
self.start_emulator(label, task)
self.port_forward(label)
self.start_agent(label)
def stop(self, label):
"""Stops a virtual machine.
@param label: virtual machine name.
@raise CuckooMachineError: if unable to stop.
"""
log.debug("Stopping vm %s" % label)
self.stop_emulator(label)
def _list(self):
"""Lists virtual machines installed.
@return: virtual machine names list.
"""
return self.options.avd.machines
def _status(self, label):
"""Gets current status of a vm.
@param label: virtual machine name.
@return: status string.
"""
log.debug("Getting status for %s" % label)
def duplicate_reference_machine(self, label):
"""Creates a new emulator based on a reference one."""
reference_machine = self.options.avd.reference_machine
log.debug("Duplicate Reference Machine '{0}'.".format(reference_machine))
# Clean/delete if new emulator already exists.
self.delete_old_emulator(label)
avd_config_file = os.path.join(self.options.avd.avd_path, reference_machine+".ini")
new_config_file = os.path.join(self.options.avd.avd_path, label+".ini")
reference_avd_path = os.path.join(self.options.avd.avd_path, reference_machine+".avd/")
new_avd_path = os.path.join(self.options.avd.avd_path, label+".avd/")
hw_qemu_config_file = os.path.join(new_avd_path, "hardware-qemu.ini")
# First we copy the template.
log.debug("Copy AVD reference config file '{0}' in '{1}'...".format(avd_config_file, new_config_file))
shutil.copyfile(avd_config_file, new_config_file)
# Copy the internal files of the reference avd.
log.debug("Duplicate the AVD internal content from '{0}' in '{1}'...".format(reference_avd_path, new_avd_path))
cmd = "cp -R {0} {1}".format(reference_avd_path, new_avd_path)
OSCommand.executeCommand(cmd)
# Than adapt the content of the copied files.
self.replace_content_in_file(new_config_file, reference_machine, label)
self.replace_content_in_file(hw_qemu_config_file, reference_machine, label)
# self.state = AVDEmulator.STATE_PREPARED
# todo:will see
def delete_old_emulator(self, label):
"""Deletes any trace of an emulator that would have the same name as
the one of the current emulator."""
old_emulator_config_file = os.path.join(self.options.avd.avd_path,
"%s.ini" % label)
if os.path.exists(old_emulator_config_file):
log.debug("Deleting old emulator config file '{0}'".format(old_emulator_config_file))
os.remove(old_emulator_config_file)
old_emulator_path = os.path.join(self.options.avd.avd_path, label+".avd/")
if os.path.isdir(old_emulator_path):
log.debug("Deleting old emulator FS '{0}'".format(old_emulator_path))
shutil.rmtree(old_emulator_path)
def replace_content_in_file(self, fileName, contentToReplace, replacementContent):
"""Replaces the specified motif by a specified value in the specified
file.
"""
log.debug("Replacing '{0}' with '{1}' in '{2}'".format(contentToReplace, replacementContent, fileName))
newLines = []
with open(fileName, 'r') as fd:
lines = fd.readlines()
for line in lines:
newLines.append(line.replace(contentToReplace, replacementContent))
with open(fileName, 'w') as fd:
fd.writelines(newLines)
def start_emulator(self, label, task):
"""Starts the emulator."""
emulator_port = self.options.get(label)["emulator_port"]
cmd = [
self.options.avd.emulator_path,
"@%s" % label,
"-no-snapshot-save",
"-netspeed",
"full",
"-netdelay",
"none",
"-port",
"%s" % emulator_port,
"-tcpdump",
self.pcap_path(task.id),
]
# In headless mode we remove the skin, audio, and window support.
if self.options.avd.mode == "headless":
cmd += ["-no-skin", "-no-audio", "-no-window"]
# If a proxy address has been provided for this analysis, then we have
# to pass the proxy address along to the emulator command. The
# mitmproxy instance is not located at the resultserver's IP address
# though, so we manually replace the IP address by localhost.
if "proxy" in task.options:
_, port = task.options["proxy"].split(":")
cmd += ["-http-proxy", "http://127.0.0.1:%s" % port]
self.emulator_processes[label] = OSCommand.executeAsyncCommand(cmd)
time.sleep(10)
# if not self.__checkADBRecognizeEmu(label):
self.restart_adb_server()
# Waits for device to be ready.
self.wait_for_device_ready(label)
def stop_emulator(self, label):
"""Stop the emulator."""
emulator_port = str(self.options.get(label)["emulator_port"])
log.info("Stopping AVD listening on port {0}".format(emulator_port))
# Kill process.
cmd = [
self.options.avd.adb_path,
"-s", "emulator-%s" % emulator_port,
"emu", "kill",
]
OSCommand.executeCommand(cmd)
time.sleep(1)
if label in self.emulator_processes:
try:
self.emulator_processes[label].kill()
except Exception as e:
log.warning(e)
del self.emulator_processes[label]
def wait_for_device_ready(self, label):
"""Analyzes the emulator and returns when it's ready."""
emulator_port = str(self.options.get(label)["emulator_port"])
adb = self.options.avd.adb_path
log.debug("Waiting for device emulator-"+emulator_port+" to be ready.")
cmd = [
adb,
"-s", "emulator-%s" % emulator_port,
"wait-for-device",
]
OSCommand.executeCommand(cmd)
log.debug("Waiting for the emulator to be ready")
log.debug(" - (dev.bootcomplete)")
ready = False
while not ready:
cmd = [
adb,
"-s", "emulator-%s" % emulator_port,
"shell", "getprop", "dev.bootcomplete",
]
result = OSCommand.executeCommand(cmd)
if result is not None and result.strip() == "1":
ready = True
else:
time.sleep(1)
log.debug("- (sys_bootcomplete)")
ready = False
while not ready:
cmd = [
adb,
"-s", "emulator-%s" % emulator_port,
"shell", "getprop", "sys.boot_completed",
]
result = OSCommand.executeCommand(cmd)
if result is not None and result.strip() == "1":
ready = True
else:
time.sleep(1)
log.debug(" - (init.svc.bootanim)")
ready = False
while not ready:
cmd = [
adb,
"-s", "emulator-%s" % emulator_port,
"shell", "getprop", "init.svc.bootanim",
]
result = OSCommand.executeCommand(cmd)
if result is not None and result.strip() == "stopped":
ready = True
else:
time.sleep(1)
time.sleep(5)
log.debug("Emulator emulator-"+emulator_port+" is ready !")
def port_forward(self, label):
cmd = [
self.options.avd.adb_path,
"-s", "emulator-%s" % self.options.get(label)["emulator_port"],
"forward", "tcp:8000", "tcp:8000",
]
OSCommand.executeAsyncCommand(cmd)
def start_agent(self, label):
cmd = [
self.options.avd.adb_path,
"-s", "emulator-%s" % self.options.get(label)["emulator_port"],
"shell", "/data/local/agent.sh",
]
OSCommand.executeAsyncCommand(cmd)
# Sleep 10 seconds to allow the agent to startup properly
time.sleep(10)
def check_adb_recognize_emulator(self, label):
"""Checks that ADB recognizes the emulator. Returns True if device is
recognized by ADB, False otherwise.
"""
log.debug("Checking if ADB recognizes emulator...")
cmd = [self.options.avd.adb_path, "devices"]
output = OSCommand.executeCommand(cmd)
emu = "emulator-%s" % self.options.get(label)["emulator_port"]
if emu in output:
log.debug("Emulator has been found!")
return True
log.debug("Emulator has not been found.")
return False
def restart_adb_server(self):
"""Restarts ADB server. This function is not used because we have to
verify we don't have multiple devices.
"""
log.debug("Restarting ADB server...")
cmd = [self.options.avd.adb_path, "kill-server"]
OSCommand.executeCommand(cmd)
log.debug("ADB server has been killed.")
cmd = [self.options.avd.adb_path, "start-server"]
OSCommand.executeCommand(cmd)
log.debug("ADB server has been restarted.")
def get_task_id(self, label):
analysistasks = ResultServer().analysistasks
for task_ip in analysistasks:
if analysistasks[task_ip][1].label is label:
return analysistasks[task_ip][0].id
return None
class OSCommand(object):
"""Tool class that provides common methods to execute commands on the OS."""
@staticmethod
def executeAsyncCommand(commandAndArgs):
return subprocess.Popen(commandAndArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@staticmethod
def executeCommand(commandAndArgs):
if isinstance(commandAndArgs, str):
commandAndArgs = shlex.split(commandAndArgs)
try:
return subprocess.check_output(commandAndArgs, stderr=subprocess.STDOUT)
except Exception:
return None
| |
from office365.runtime.client_result import ClientResult
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.changes.change_collection import ChangeCollection
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.comments.comment_collection import CommentCollection
from office365.sharepoint.fields.field_lookup_value import FieldLookupValue
from office365.sharepoint.fields.fieldMultiLookupValue import FieldMultiLookupValue
from office365.sharepoint.likes.liked_by_information import LikedByInformation
from office365.sharepoint.listitems.form_update_value import ListItemFormUpdateValue
from office365.sharepoint.listitems.list_item_version import ListItemVersion
from office365.sharepoint.permissions.securable_object import SecurableObject
from office365.sharepoint.reputationmodel.reputation import Reputation
from office365.sharepoint.sharing.externalSharingSiteOption import ExternalSharingSiteOption
from office365.sharepoint.sharing.object_sharing_information import ObjectSharingInformation
from office365.sharepoint.sharing.sharing_result import SharingResult
from office365.sharepoint.taxonomy.taxonomy_field_value import TaxonomyFieldValueCollection
from office365.sharepoint.ui.applicationpages.client_people_picker import (
ClientPeoplePickerWebServiceInterface, ClientPeoplePickerQueryParameters
)
class ListItem(SecurableObject):
"""An individual entry within a SharePoint list. Each list item has a schema that maps to fields in the list
that contains the item, depending on the content type of the item."""
def __init__(self, context, resource_path=None, parent_list=None):
"""
:type context: office365.sharepoint.client_context.ClientContext
:type resource_path: office365.runtime.client_path.ClientPath or None
:type parent_list: office365.sharepoint.lists.list.List or None
"""
super(ListItem, self).__init__(context, resource_path)
if parent_list is not None:
self.set_property("ParentList", parent_list, False)
def set_rating(self, value):
"""
Rates an item within the specified list. The return value is the average rating for the specified list item.
:param int value: An integer value for the rating to be submitted.
The rating value SHOULD be between 1 and 5; otherwise, the server SHOULD return an exception.
"""
return_value = ClientResult(self.context)
def _list_item_loaded():
Reputation.set_rating(self.context, self.parent_list.id, self.id, value, return_value)
self.parent_list.ensure_properties(["Id", "ParentList"], _list_item_loaded)
return return_value
def set_like(self, value):
"""
Sets or unsets the like quality for the current user for an item within
the specified list. The return value is the total number of likes for the specified list item.
:param bool value: A Boolean value that indicates the operation being either like or unlike.
A True value indicates like.
"""
return_value = ClientResult(self.context)
def _list_item_loaded():
Reputation.set_like(self.context, self.parent_list.id, self.id, value, return_value)
self.parent_list.ensure_properties(["Id", "ParentList"], _list_item_loaded)
return return_value
def get_wopi_frame_url(self, action):
"""
Gets the full URL to the SharePoint frame page that initiates the SPWOPIAction object with the WOPI
application associated with the list item.
:param int action: Indicates which user action is indicated in the returned WOPIFrameUrl.
"""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "GetWOPIFrameUrl", [action], None, None, result)
self.context.add_query(qry)
return result
def recycle(self):
"""Moves the listItem to the Recycle Bin and returns the identifier of the new Recycle Bin item."""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "Recycle", None, None, None, result)
self.context.add_query(qry)
return result
def get_changes(self, query=None):
"""Returns the collection of changes from the change log that have occurred within the ListItem,
based on the specified query.
:param office365.sharepoint.changeQuery.ChangeQuery query: Specifies which changes to return
"""
if query is None:
query = ChangeQuery(item=True)
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def share(self, user_principal_name,
share_option=ExternalSharingSiteOption.View,
send_email=True, email_subject=None, email_body=None):
"""
Share a ListItem (file or folder facet)
:param str user_principal_name: User identifier
:param ExternalSharingSiteOption share_option: The sharing type of permission to grant on the object.
:param bool send_email: A flag to determine if an email notification SHOULD be sent (if email is configured).
:param str email_subject: The email subject.
:param str email_body: The email subject.
:rtype: SharingResult
"""
result = ClientResult(self.context, SharingResult(self.context))
file_result = ClientResult(self.context)
role_values = {
ExternalSharingSiteOption.View: "role:1073741826",
ExternalSharingSiteOption.Edit: "role:1073741827",
}
def _property_resolved():
file_result.value = self.get_property("EncodedAbsUrl")
def _picker_value_resolved(picker_value):
from office365.sharepoint.webs.web import Web
result.value = Web.share_object(self.context, file_result.value, picker_value, role_values[share_option],
0,
False, send_email, False, email_subject, email_body)
self.ensure_property("EncodedAbsUrl", _property_resolved)
params = ClientPeoplePickerQueryParameters(user_principal_name)
ClientPeoplePickerWebServiceInterface.client_people_picker_resolve_user(self.context,
params, _picker_value_resolved)
return result.value
def unshare(self):
"""
Unshare a ListItem (file or folder facet)
:rtype: SharingResult
"""
result = ClientResult(self.context, SharingResult(self.context))
def _property_resolved():
abs_url = self.get_property("EncodedAbsUrl")
from office365.sharepoint.webs.web import Web
result.value = Web.unshare_object(self.context, abs_url)
self.ensure_property("EncodedAbsUrl", _property_resolved)
return result.value
def get_sharing_information(self):
"""
Retrieves information about the sharing state for a given list item.
"""
return_type = ObjectSharingInformation(self.context)
def _item_resolved():
ObjectSharingInformation.get_list_item_sharing_information(
self.context, self.parent_list.properties["Id"], self.properties["Id"], return_type=return_type)
self.ensure_properties(["Id", "ParentList"], _item_resolved)
return return_type
def validate_update_list_item(self, form_values, new_document_update=False, checkin_comment=None):
"""Validates and sets the values of the specified collection of fields for the list item.
:param dict form_values: Specifies a collection of field internal names and values for the given field
:param dict new_document_update: Specifies whether the list item is a document being updated after upload.
:param str checkin_comment: Check-in comment, if any. This parameter is only applicable when the list item
is checked out.
"""
normalized_form_values = [ListItemFormUpdateValue(k, v) for k, v in form_values.items()]
payload = {
"formValues": normalized_form_values,
"bNewDocumentUpdate": new_document_update,
"checkInComment": checkin_comment,
"datesInUTC": True
}
result = ClientResult(self.context, ClientValueCollection(ListItemFormUpdateValue))
qry = ServiceOperationQuery(self, "ValidateUpdateListItem", None, payload, None, result)
self.context.add_query(qry)
return result
def update(self):
"""
Updates the item without creating another version of the item.
Exceptions:
- 2130575305 Microsoft.SharePoint.SPException List item was modified on the server in a way that prevents
changes from being committed, as determined by the protocol server.
-1 System.InvalidOperationException List does not support this operation.
"""
self.ensure_type_name(self.parent_list)
super(ListItem, self).update()
return self
def system_update(self):
"""Update the list item."""
qry = ServiceOperationQuery(self, "SystemUpdate")
self.context.add_query(qry)
return self
def update_overwrite_version(self):
"""Updates the item without creating another version of the item."""
qry = ServiceOperationQuery(self, "UpdateOverwriteVersion")
self.context.add_query(qry)
return self
def set_comments_disabled(self, value):
"""
Sets the value of CommentsDisabled (section 3.2.5.87.1.1.8) for the item.
:type value: bool
"""
qry = ServiceOperationQuery(self, "SetCommentsDisabled", [value])
self.context.add_query(qry)
return self
def get_comments(self):
comments = CommentCollection(self.context)
qry = ServiceOperationQuery(self, "GetComments", [], None, None, comments)
self.context.add_query(qry)
return comments
def parse_and_set_field_value(self, field_name, value):
"""Sets the value of the field (2) for the list item based on an implementation-specific transformation
of the value..
:param str field_name: Specifies the field internal name.
:param str value: Specifies the new value for the field (2).
"""
payload = {
"fieldName": field_name,
"value": value
}
qry = ServiceOperationQuery(self, "ParseAndSetFieldValue", None, payload)
self.context.add_query(qry)
return self
@property
def display_name(self):
"""Specifies the display name of the list item.
:rtype: str or None
"""
return self.properties.get("DisplayName", None)
@property
def parent_list(self):
"""Get parent List"""
from office365.sharepoint.lists.list import List
return self.properties.get("ParentList", List(self.context, ResourcePath("ParentList", self.resource_path)))
@property
def file(self):
"""Get file"""
from office365.sharepoint.files.file import File
return self.properties.get("File", File(self.context, ResourcePath("File", self.resource_path)))
@property
def folder(self):
"""Get folder"""
from office365.sharepoint.folders.folder import Folder
return self.properties.get("Folder", Folder(self.context, ResourcePath("Folder", self.resource_path)))
@property
def attachment_files(self):
"""Specifies the collection of attachments that are associated with the list item.<62>"""
from office365.sharepoint.attachments.attachmentfile_collection import AttachmentFileCollection
return self.properties.get("AttachmentFiles",
AttachmentFileCollection(self.context,
ResourcePath("AttachmentFiles", self.resource_path)))
@property
def content_type(self):
"""Gets a value that specifies the content type of the list item."""
from office365.sharepoint.contenttypes.content_type import ContentType
return self.properties.get("ContentType", ContentType(self.context,
ResourcePath("ContentType", self.resource_path)))
@property
def effective_base_permissions(self):
"""Gets a value that specifies the effective permissions on the list item that are assigned
to the current user."""
from office365.sharepoint.permissions.base_permissions import BasePermissions
return self.properties.get("EffectiveBasePermissions", BasePermissions())
@property
def field_values(self):
"""Gets a collection of key/value pairs containing the names and values for the fields of the list item."""
return self.properties.get("FieldValues", None)
@property
def comments_disabled(self):
"""
:rtype: bool or None
"""
return self.properties.get("CommentsDisabled", None)
@property
def file_system_object_type(self):
"""
Gets a value that specifies whether the list item is a file or a list folder.
:rtype: str or None
"""
return self.properties.get("FileSystemObjectType", None)
@property
def id(self):
"""
Gets a value that specifies the list item identifier.
:rtype: int
"""
return self.properties.get("Id", None)
@property
def liked_by_information(self):
"""
Gets a value that specifies the list item identifier.
:rtype: LikedByInformation
"""
return self.properties.get("LikedByInformation",
LikedByInformation(self.context,
ResourcePath("likedByInformation", self.resource_path)))
@property
def versions(self):
"""Gets the collection of item version objects that represent the versions of the item."""
return self.properties.get('Versions',
BaseEntityCollection(self.context, ListItemVersion,
ResourcePath("versions", self.resource_path)))
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"AttachmentFiles": self.attachment_files,
"ContentType": self.content_type,
"EffectiveBasePermissions": self.effective_base_permissions,
"LikedByInformation": self.liked_by_information,
"ParentList": self.parent_list,
}
default_value = property_mapping.get(name, None)
value = super(ListItem, self).get_property(name, default_value)
if self.is_property_available(name[:-2]):
lookup_value = super(ListItem, self).get_property(name[:-2], default_value)
if isinstance(lookup_value, FieldMultiLookupValue):
return ClientValueCollection(int, [v.LookupId for v in lookup_value])
elif isinstance(lookup_value, FieldLookupValue):
return lookup_value.LookupId
return value
def set_property(self, name, value, persist_changes=True):
if persist_changes:
if isinstance(value, TaxonomyFieldValueCollection):
self._set_taxonomy_field_value(name, value)
elif isinstance(value, FieldMultiLookupValue):
collection = ClientValueCollection(int, [v.LookupId for v in value])
super(ListItem, self).set_property("{name}Id".format(name=name), collection)
super(ListItem, self).set_property(name, value, False)
elif isinstance(value, FieldLookupValue):
super(ListItem, self).set_property("{name}Id".format(name=name), value.LookupId)
super(ListItem, self).set_property(name, value, False)
else:
super(ListItem, self).set_property(name, value, persist_changes)
else:
super(ListItem, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "Id" and self._parent_collection is not None:
self._resource_path = ServiceOperationPath(
"getItemById", [value], self._parent_collection.resource_path.parent)
return self
def _set_taxonomy_field_value(self, name, value):
tax_field = self.parent_list.fields.get_by_internal_name_or_title(name)
def _tax_field_loaded():
tax_text_field = self.parent_list.fields.get_by_id(tax_field.properties["TextField"])
def _tax_text_field_loaded():
self.set_property(tax_text_field.properties["StaticName"], str(value))
tax_text_field.ensure_property("StaticName", _tax_text_field_loaded)
tax_field.ensure_property("TextField", _tax_field_loaded)
def ensure_type_name(self, target_list):
"""
Determine metadata annotation for ListItem entity
:param office365.sharepoint.lists.list.List target_list: List resource
"""
def _init_item_type():
self._entity_type_name = target_list.properties['ListItemEntityTypeFullName']
if not self._entity_type_name:
target_list.ensure_property("ListItemEntityTypeFullName", _init_item_type)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import numpy as np
from pyspark import RDD, since
from pyspark.streaming.dstream import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py, inherit_doc
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.util import Saveable, Loader
__all__ = ['LabeledPoint', 'LinearModel',
'LinearRegressionModel', 'LinearRegressionWithSGD',
'RidgeRegressionModel', 'RidgeRegressionWithSGD',
'LassoModel', 'LassoWithSGD', 'IsotonicRegressionModel',
'IsotonicRegression', 'StreamingLinearAlgorithm',
'StreamingLinearRegressionWithSGD']
class LabeledPoint(object):
"""
Class that represents the features and labels of a data point.
:param label:
Label for this data point.
:param features:
Vector of features for this point (NumPy array, list,
pyspark.mllib.linalg.SparseVector, or scipy.sparse column matrix).
.. note:: 'label' and 'features' are accessible as class attributes.
.. versionadded:: 1.0.0
"""
def __init__(self, label, features):
self.label = float(label)
self.features = _convert_to_vector(features)
def __reduce__(self):
return (LabeledPoint, (self.label, self.features))
def __str__(self):
return "(" + ",".join((str(self.label), str(self.features))) + ")"
def __repr__(self):
return "LabeledPoint(%s, %s)" % (self.label, self.features)
class LinearModel(object):
"""
A linear model that has a vector of coefficients and an intercept.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
self._coeff = _convert_to_vector(weights)
self._intercept = float(intercept)
@property
@since("1.0.0")
def weights(self):
"""Weights computed for every feature."""
return self._coeff
@property
@since("1.0.0")
def intercept(self):
"""Intercept computed for this model."""
return self._intercept
def __repr__(self):
return "(weights=%s, intercept=%r)" % (self._coeff, self._intercept)
@inherit_doc
class LinearRegressionModelBase(LinearModel):
"""A linear regression model.
>>> lrmb = LinearRegressionModelBase(np.array([1.0, 2.0]), 0.1)
>>> abs(lrmb.predict(np.array([-1.03, 7.777])) - 14.624) < 1e-6
True
>>> abs(lrmb.predict(SparseVector(2, {0: -1.03, 1: 7.777})) - 14.624) < 1e-6
True
.. versionadded:: 0.9.0
"""
@since("0.9.0")
def predict(self, x):
"""
Predict the value of the dependent variable given a vector or
an RDD of vectors containing values for the independent variables.
"""
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
return self.weights.dot(x) + self.intercept
@inherit_doc
class LinearRegressionModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LinearRegressionModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... miniBatchFraction=1.0, initialWeights=np.array([1.0]), regParam=0.1, regType="l2",
... intercept=True, validateData=True)
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a LinearRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a LinearRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = LinearRegressionModel(weights, intercept)
return model
# train_func should take two parameters, namely data and initial_weights, and
# return the result of a call to the appropriate JVM stub.
# _regression_train_wrapper is responsible for setup and error checking.
def _regression_train_wrapper(train_func, modelClass, data, initial_weights):
from pyspark.mllib.classification import LogisticRegressionModel
first = data.first()
if not isinstance(first, LabeledPoint):
raise TypeError("data should be an RDD of LabeledPoint, but got %s" % type(first))
if initial_weights is None:
initial_weights = [0.0] * len(data.first().features)
if (modelClass == LogisticRegressionModel):
weights, intercept, numFeatures, numClasses = train_func(
data, _convert_to_vector(initial_weights))
return modelClass(weights, intercept, numFeatures, numClasses)
else:
weights, intercept = train_func(data, _convert_to_vector(initial_weights))
return modelClass(weights, intercept)
class LinearRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.0, regType=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam),
regType, bool(intercept), bool(validateData),
float(convergenceTol))
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
@inherit_doc
class LassoModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit with
an l_1 penalty term.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = LassoWithSGD.train(
... sc.parallelize(data), iterations=10, initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LassoModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = LassoWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... regParam=0.01, miniBatchFraction=1.0, initialWeights=np.array([1.0]), intercept=True,
... validateData=True)
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a LassoModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a LassoModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = LassoModel(weights, intercept)
return model
class LassoWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 1.0.
Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a regression model with L1-regularization using Stochastic
Gradient Descent. This solves the l1-regularized least squares
regression formulation
f(weights) = 1/(2n) ||A weights - y||^2 + regParam ||weights||_1
Here the data matrix has n rows, and the input RDD holds the set
of rows of A, each with its corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 1.0. "
"Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.",
DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLassoModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, bool(intercept),
bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LassoModel, data, initialWeights)
@inherit_doc
class RidgeRegressionModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit with
an l_2 penalty term.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = RidgeRegressionModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... regParam=0.01, miniBatchFraction=1.0, initialWeights=np.array([1.0]), intercept=True,
... validateData=True)
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a RidgeRegressionMode."""
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a RidgeRegressionMode."""
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = RidgeRegressionModel(weights, intercept)
return model
class RidgeRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 0.0.
Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for
LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a regression model with L2-regularization using Stochastic
Gradient Descent. This solves the l2-regularized least squares
regression formulation
f(weights) = 1/(2n) ||A weights - y||^2 + regParam/2 ||weights||^2
Here the data matrix has n rows, and the input RDD holds the set
of rows of A, each with its corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 0.0. "
"Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for "
"LinearRegression.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainRidgeModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, bool(intercept),
bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, RidgeRegressionModel, data, initialWeights)
class IsotonicRegressionModel(Saveable, Loader):
"""
Regression model for isotonic regression.
:param boundaries:
Array of boundaries for which predictions are known. Boundaries
must be sorted in increasing order.
:param predictions:
Array of predictions associated to the boundaries at the same
index. Results of isotonic regression and therefore monotone.
:param isotonic:
Indicates whether this is isotonic or antitonic.
>>> data = [(1, 0, 1), (2, 1, 1), (3, 2, 1), (1, 3, 1), (6, 4, 1), (17, 5, 1), (16, 6, 1)]
>>> irm = IsotonicRegression.train(sc.parallelize(data))
>>> irm.predict(3)
2.0
>>> irm.predict(5)
16.5
>>> irm.predict(sc.parallelize([3, 5])).collect()
[2.0, 16.5]
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> irm.save(sc, path)
>>> sameModel = IsotonicRegressionModel.load(sc, path)
>>> sameModel.predict(3)
2.0
>>> sameModel.predict(5)
16.5
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.4.0
"""
def __init__(self, boundaries, predictions, isotonic):
self.boundaries = boundaries
self.predictions = predictions
self.isotonic = isotonic
@since("1.4.0")
def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions)
@since("1.4.0")
def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
class IsotonicRegression(object):
"""
Isotonic regression.
Currently implemented using parallelized pool adjacent violators
algorithm. Only univariate (single feature) algorithm supported.
Sequential PAV implementation based on:
Tibshirani, Ryan J., Holger Hoefling, and Robert Tibshirani.
"Nearly-isotonic regression." Technometrics 53.1 (2011): 54-61.
Available from http://www.stat.cmu.edu/~ryantibs/papers/neariso.pdf
Sequential PAV parallelization based on:
Kearsley, Anthony J., Richard A. Tapia, and Michael W. Trosset.
"An approach to parallelizing isotonic regression."
Applied Mathematics and Parallel Computing. Physica-Verlag HD, 1996. 141-147.
Available from http://softlib.rice.edu/pub/CRPC-TRs/reports/CRPC-TR96640.pdf
See `Isotonic regression (Wikipedia) <http://en.wikipedia.org/wiki/Isotonic_regression>`_.
.. versionadded:: 1.4.0
"""
@classmethod
@since("1.4.0")
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
class StreamingLinearAlgorithm(object):
"""
Base class that has to be inherited by any StreamingLinearAlgorithm.
Prevents reimplementation of methods predictOn and predictOnValues.
.. versionadded:: 1.5.0
"""
def __init__(self, model):
self._model = model
@since("1.5.0")
def latestModel(self):
"""
Returns the latest model.
"""
return self._model
def _validate(self, dstream):
if not isinstance(dstream, DStream):
raise TypeError(
"dstream should be a DStream object, got %s" % type(dstream))
if not self._model:
raise ValueError(
"Model must be intialized using setInitialWeights")
@since("1.5.0")
def predictOn(self, dstream):
"""
Use the model to make predictions on batches of data from a
DStream.
:return:
DStream containing predictions.
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since("1.5.0")
def predictOnValues(self, dstream):
"""
Use the model to make predictions on the values of a DStream and
carry over its keys.
:return:
DStream containing the input keys and the predictions as values.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
@inherit_doc
class StreamingLinearRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a linear regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model
based on each new batch of incoming data from a DStream
(see `LinearRegressionWithSGD` for model equation).
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight vector must
be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLinearRegressionWithSGD, self).__init__(
model=self._model)
@since("1.5.0")
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn
"""
initialWeights = _convert_to_vector(initialWeights)
self._model = LinearRegressionModel(initialWeights, 0)
return self
@since("1.5.0")
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LinearRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LinearRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
intercept=self._model.intercept, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.regression
globs = pyspark.mllib.regression.__dict__.copy()
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.regression tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| |
import datetime
import re
from firebase import firebase
import ENV_VAR as ENV
import timezone_handler as tz
# fb = firebase.FirebaseApplication("https://burning-heat-7654.firebaseio.com/", None)
fb = firebase.FirebaseApplication(ENV.FIREBASE_LINK, None)
# _YES = ["yes", "Yes", "YES", "Y", "y"]
# _NO = ["no", "No", "NO", "N", "n"]
TIME_LINE = 190000 # 24 hour standard ==> 19:00:00
RECORD_DAY_CHANGE_TIME_LINE = 180000 # 1 hour before TIME_LINE ==> 18:00:00
def get_brisbane_time():
return tz.get_brisbane_time().time()
def get_date():
"""
just simple get today's date
:return <class 'datetime.date'>, 2016-06-03
"""
return tz.get_brisbane_time().date()
def get_start_date(query):
"""
get the evaluation start-date of this contact
:param query: string a path contains startDate key /logging/response/<contact_name>
:return <class 'datetime.date'>, 2016-06-03
"""
if is_new_contact(query):
return get_date()
return datetime.datetime.strptime(fb.get(query, None)["startDate"], "%Y%m%d").date()
def str_date(date):
"""
convert the datetime.date into string
:param date: <class 'datetime.date'>, 2016-06-10 02:03:46.788409+10:00
:return 20160603
"""
d = str(date).split('-')
string_date = ""
for i in range(len(d)):
string_date += d[i]
return string_date
def time_is_over_day_segment():
"""
a return of get_brisbane_time() follows this format: 16:21:26.009927
we need 16:21:26 ==> 162126
"""
now_list = str(get_brisbane_time()).split('.')[0].split(':')
now_string = ""
for segment in now_list:
now_string += segment
now = int(now_string)
"""
if the response time is less than RECORD_DAY_CHANGE_TIME_LINE, 18 o'clock, we seems it's the previous day's
response so that delta should minus 1. Otherwise, delta is right.
"""
if now < RECORD_DAY_CHANGE_TIME_LINE:
return False
return True
def generate_query_by_time(query):
delta = get_days_delta(query)
query += "/dailyMessage/" + generate_time_path_by_delta(delta)
return query
def get_days_delta(query):
"""
this function generates the time layout query based on the date
:param query: string; the base of query path: "/logging/response/"
:return the final query path: "/logging/response/<contact>/dailyMessage/week1/day1"
"""
# the format of delta: 18 days, 0:00:00
try:
delta_string = str(get_date() - get_start_date(query)).split(' ')[0]
delta = int(delta_string)
except TypeError as err:
"""
could get_start_date of contact because it hasn't been initialized.
"""
# print("the contact hasn't initialize the evaluation: ", err)
return None, err
except ValueError as err:
"""
this exception is used to handle the first day record error.
minus the same dates will get "0:00:00".
"""
if delta_string == "0:00:00":
delta = 0
else:
# print("the date has some problem: ", err)
return None, err
return delta
def generate_time_path_by_delta(delta):
if delta <= 0:
return "week1/day1"
else:
if not time_is_over_day_segment():
delta -= 1
week = str(delta // 7 + 1)
day = str(delta % 7 + 1)
return "week" + week + "/day" + day
def get_week_number(time_path):
return int(time_path.split("week")[1].split("/")[0])
def is_new_contact(query):
"""
:param query: /logging/response/<contact>
:return if user has data, means not a new contact, return false; if none, return true.
"""
if fb.get(query, None):
return False
return True
def create_contact_info(query, user, number):
"""
create the basic info of contact under /logging/response
:param query: /logging/response/<contact>
:param user: string
:param number: string "61478417108"
"""
data = {
'startDate': str_date(get_date()),
'number': number,
'user': user
}
fb.patch(query, data)
def text_parse(text):
"""
parse the response text and split the record
:param text:
:return tuple <times(int), is_connected(boolean)>
"""
is_connected = False
times = 0
text = clean_text(text)
"""
Since the response follow that, if yes, "Y<whatever>3"; if no, "N"
we just need find the digit in the text
if the text does contain digit, it must be no;
if contains and great than 0, it must be yes.
"""
match = re.search(r"\d+", text)
if match:
times = int(match.group())
if times > 0:
is_connected = True
elif "once" in text:
is_connected = True
times = 1
return times, is_connected
def clean_text(text):
"""
:param text: any string
re.complie("\W+") pick up all letters
.sub(" ", text) break down string at " "
.lower() output at lowercase
"""
return re.compile("\W+").sub(" ", text).strip().lower()
def daily_message_logger(contact, text):
query = "/logging/response/" + contact
query = generate_query_by_time(query)
if not query[0]:
raise Exception("query module report an error")
times, is_connected = text_parse(text)
response_time = str(tz.get_brisbane_time())
data = {
'date': str_date(get_date()),
'isResponsed': True,
'responseText': text,
'isConnected': is_connected,
'times': times,
'responseTime': response_time,
}
fb.patch(query, data)
def daily_logging(user, contact, number, text):
"""
create the daily message logging automatically, also create the record the data based on the time changing
:param user: string
:param contact: string
:param number: string "61478417108"
:param text: string
:return success update return True, or not
"""
# print("daily_logging")
try:
base_query = "/logging/response/" + contact
if is_new_contact(base_query):
create_contact_info(base_query, user, number)
daily_message_logger(contact, text)
return True
except Exception as err:
# print("Handling unknown error: ", err)
return False
def status_logging():
pass
# may need a new query builder. the path could be /logging/na
#################################################################
def toy_message_logging(username, contact_name):
if not is_a_valid_user(username):
# TODO: a error reporter
print("can't find user: " + username)
pass
query = "/logging/toy/" + username + "/"
if is_new_toy_logging(query):
create_toy_logging(query)
query = generate_query_by_time(query)
total = get_total_times(query)
if total is None:
data = {"date": str_date(get_date()),
"total": 1}
fb.patch(query, data)
else:
total += 1
data = {"total": total}
fb.patch(query, data)
query += "/originRecord"
record_data = {"loggingTime": str(tz.get_brisbane_time()),
"target": contact_name}
fb.post(query, record_data)
return True
def is_a_valid_user(username):
query = "/user/" + username
if fb.get(query, None):
return True
return False
def is_new_toy_logging(query):
if not fb.get(query, None):
return True
return False
def create_toy_logging(query):
data = {'startDate': str_date(get_date())}
fb.patch(query, data)
def get_total_times(query):
return fb.get(query + "/total", None)
################################################################
# page and device status logger
def page_reload_logger(username, datetime):
data = {
"datetime": datetime
}
query = "/logging/toy/" + username
if is_new_toy_logging(query):
create_toy_logging(query)
query += "/device/" + generate_time_path_by_delta(get_days_delta(query)) + "/pageReload/originRecord"
fb.post(query, data)
account_reload_times(query)
def account_reload_times(query_record):
query_reload = query_record.split("/originRecord")[0]
try:
times = len(fb.get(query_record, None))
except TypeError:
times = 1
fb.patch(query_reload, {"total": times})
def page_visibility_status(username, visibility_status, datetime):
data = {
"visibilityStatus": visibility_status,
"datetime": datetime
}
query = "/logging/toy/" + username
if is_new_toy_logging(query):
create_toy_logging(query)
query += "/device/" + generate_time_path_by_delta(get_days_delta(query)) + "/pageStatus"
fb.post(query, data)
| |
import mdp
# import numeric module (scipy, Numeric or numarray)
numx, numx_rand, numx_linalg = mdp.numx, mdp.numx_rand, mdp.numx_linalg
numx_description = mdp.numx_description
import random
import itertools
def timediff(data):
"""Returns the array of the time differences of data."""
# this is the fastest way we found so far
return data[1:]-data[:-1]
def refcast(array, dtype):
"""
Cast the array to dtype only if necessary, otherwise return a reference.
"""
dtype = mdp.numx.dtype(dtype)
if array.dtype == dtype:
return array
return array.astype(dtype)
def scast(scalar, dtype):
"""Convert a scalar in a 0D array of the given dtype."""
return numx.array(scalar, dtype=dtype)
def rotate(mat, angle, columns=(0, 1), units='radians'):
"""
Rotate in-place data matrix (NxM) in the plane defined by the columns=[i,j]
when observation are stored on rows. Observations are rotated
counterclockwise. This corresponds to the following matrix-multiplication
for each data-point (unchanged elements omitted):
[ cos(angle) -sin(angle) [ x_i ]
sin(angle) cos(angle) ] * [ x_j ]
If M=2, columns=[0,1].
"""
if units is 'degrees':
angle = angle/180.*numx.pi
cos_ = numx.cos(angle)
sin_ = numx.sin(angle)
[i, j] = columns
col_i = mat[:, i] + 0.
col_j = mat[:, j]
mat[:, i] = cos_*col_i - sin_*col_j
mat[:, j] = sin_*col_i + cos_*col_j
def permute(x, indices=(0, 0), rows=0, cols=1):
"""Swap two columns and (or) two rows of 'x', whose indices are specified
in indices=[i,j].
Note: permutations are done in-place. You'll lose your original matrix"""
## the nicer option:
## x[i,:],x[j,:] = x[j,:],x[i,:]
## does not work because array-slices are references.
## The following would work:
## x[i,:],x[j,:] = x[j,:].tolist(),x[i,:].tolist()
## because list-slices are copies, but you get 2
## copies instead of the one you need with our method.
## This would also work:
## tmp = x[i,:].copy()
## x[i,:],x[j,:] = x[j,:],tmp
## but it is slower (for larger matrices) than the one we use.
[i, j] = indices
if rows:
x[i, :], x[j, :] = x[j, :], x[i, :] + 0
if cols:
x[:, i], x[:, j] = x[:, j], x[:, i] + 0
def hermitian(x):
"""Compute the Hermitian, i.e. conjugate transpose, of x."""
return x.T.conj()
def symrand(dim_or_eigv, dtype="d"):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = (numx_rand.random(dim)*2) - 1
elif isinstance(dim_or_eigv,
numx.ndarray) and len(dim_or_eigv.shape) == 1:
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise mdp.MDPException("input type not supported.")
v = random_rot(dim)
#h = mdp.utils.mult(mdp.utils.mult(hermitian(v), mdp.numx.diag(d)), v)
h = mdp.utils.mult(mult_diag(d, hermitian(v), left=False), v)
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
if dtype in ('D', 'F', 'G'):
h2 = symrand(dim_or_eigv)
h = h + 1j*(numx.triu(h2)-numx.tril(h2))
return refcast(h, dtype)
def random_rot(dim, dtype='d'):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = mdp.numx.eye(dim, dtype=dtype)
D = mdp.numx.ones((dim,), dtype=dtype)
for n in range(1, dim):
x = mdp.numx_rand.normal(size=(dim-n+1,)).astype(dtype)
D[n-1] = mdp.numx.sign(x[0])
x[0] -= D[n-1]*mdp.numx.sqrt((x*x).sum())
# Householder transformation
Hx = ( mdp.numx.eye(dim-n+1, dtype=dtype)
- 2.*mdp.numx.outer(x, x)/(x*x).sum() )
mat = mdp.numx.eye(dim, dtype=dtype)
mat[n-1:, n-1:] = Hx
H = mdp.utils.mult(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
# Equivalent to mult(numx.diag(D), H) but faster
H = (D*H.T).T
return H
def norm2(v):
"""Compute the 2-norm for 1D arrays.
norm2(v) = sqrt(sum(v_i^2))"""
return numx.sqrt((v*v).sum())
def cov2(x, y):
"""Compute the covariance between 2D matrices x and y.
Complies with the old scipy.cov function: different variables
are on different columns."""
mnx = x.mean(axis=0)
mny = y.mean(axis=0)
tlen = x.shape[0]
return mdp.utils.mult(x.T, y)/(tlen-1) - numx.outer(mnx, mny)
def cov_maxima(cov):
"""Extract the maxima of a covariance matrix."""
dim = cov.shape[0]
maxs = []
if dim >= 1:
cov=abs(cov)
glob_max_idx = (cov.argmax()//dim, cov.argmax()%dim)
maxs.append(cov[glob_max_idx[0], glob_max_idx[1]])
cov_reduce = cov.copy()
cov_reduce = cov_reduce[numx.arange(dim) != glob_max_idx[0], :]
cov_reduce = cov_reduce[:, numx.arange(dim) != glob_max_idx[1]]
maxs.extend(cov_maxima(cov_reduce))
return maxs
else:
return []
def mult_diag(d, mtx, left=True):
"""Multiply a full matrix by a diagonal matrix.
This function should always be faster than dot.
Input:
d -- 1D (N,) array (contains the diagonal elements)
mtx -- 2D (N,N) array
Output:
mult_diag(d, mts, left=True) == dot(diag(d), mtx)
mult_diag(d, mts, left=False) == dot(mtx, diag(d))
"""
if left:
return (d*mtx.T).T
else:
return d*mtx
def comb(N, k):
"""Return number of combinations of k objects from a set of N objects
without repetitions, a.k.a. the binomial coefficient of N and k."""
ret = 1
for mlt in xrange(N, N-k, -1):
ret *= mlt
for dv in xrange(1, k+1):
ret //= dv
return ret
# WARNING numpy.linalg.eigh does not support float sizes larger than 64 bits,
# and complex numbers of size larger than 128 bits.
# Also float16 is not supported either.
# This is not a problem for MDP, as long as scipy.linalg.eigh is available.
def get_dtypes(typecodes_key, _safe=True):
"""Return the list of dtypes corresponding to the set of
typecodes defined in numpy.typecodes[typecodes_key].
E.g., get_dtypes('Float') = [dtype('f'), dtype('d'), dtype('g')].
If _safe is True (default), we remove large floating point types
if the numerical backend does not support them.
"""
types = []
for c in numx.typecodes[typecodes_key]:
try:
type_ = numx.dtype(c)
if (_safe and not mdp.config.has_symeig == 'scipy.linalg.eigh'
and type_ in _UNSAFE_DTYPES):
continue
types.append(type_)
except TypeError:
pass
return types
_UNSAFE_DTYPES = [numx.typeDict[d] for d in
['float16', 'float96', 'float128', 'complex192', 'complex256']
if d in numx.typeDict]
def nongeneral_svd(A, range=None, **kwargs):
"""SVD routine for simple eigenvalue problem, API is compatible with
symeig."""
Z2, w, Z = mdp.utils.svd(A)
# sort eigenvalues and corresponding eigenvectors
idx = w.argsort()
w = w.take(idx)
Z = Z.take(idx, axis=0).T
if range is not None:
lo, hi = range
Z = Z[:, lo-1:hi]
w = w[lo-1:hi]
return w, Z
def sqrtm(A):
"""This is a symmetric definite positive matrix sqrt function"""
d, V = mdp.utils.symeig(A)
return mdp.utils.mult(V, mult_diag(numx.sqrt(d), V.T))
# replication functions
def lrep(x, n):
"""Replicate x n-times on a new first dimension"""
shp = [1]
shp.extend(x.shape)
return x.reshape(shp).repeat(n, axis=0)
def rrep(x, n):
"""Replicate x n-times on a new last dimension"""
shp = x.shape + (1,)
return x.reshape(shp).repeat(n, axis=-1)
def irep(x, n, dim):
"""Replicate x n-times on a new dimension dim-th dimension"""
x_shape = x.shape
shp = x_shape[:dim] + (1,) + x_shape[dim:]
return x.reshape(shp).repeat(n, axis=dim)
# /replication functions
try:
# product exists only in itertools >= 2.6
from itertools import product
except ImportError:
def product(*args, **kwds):
"""Cartesian product of input iterables.
"""
# taken from python docs 2.6
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def orthogonal_permutations(a_dict):
"""
Takes a dictionary with lists as keys and returns all permutations
of these list elements in new dicts.
This function is useful, when a method with several arguments
shall be tested and all of the arguments can take several values.
The order is not defined, therefore the elements should be
orthogonal to each other.
>>> for i in orthogonal_permutations({'a': [1,2,3], 'b': [4,5]}):
print i
{'a': 1, 'b': 4}
{'a': 1, 'b': 5}
{'a': 2, 'b': 4}
{'a': 2, 'b': 5}
{'a': 3, 'b': 4}
{'a': 3, 'b': 5}
"""
pool = dict(a_dict)
args = []
for func, all_args in pool.items():
# check the size of the list in the second item of the tuple
args_with_fun = [(func, arg) for arg in all_args]
args.append(args_with_fun)
for i in product(*args):
yield dict(i)
def izip_stretched(*iterables):
"""Same as izip, except that for convenience non-iterables are repeated ad infinitum.
This is useful when trying to zip input data with respective labels
and allows for having a single label for all data, as well as for
havning a list of labels for each data vector.
Note that this will take strings as an iterable (of course), so
strings acting as a single value need to be wrapped in a repeat
statement of their own.
Thus,
>>> for zipped in izip_stretched([1, 2, 3], -1):
print zipped
(1, -1)
(2, -1)
(3, -1)
is equivalent to
>>> for zipped in izip([1, 2, 3], [-1] * 3):
print zipped
(1, -1)
(2, -1)
(3, -1)
"""
def iter_or_repeat(val):
try:
return iter(val)
except TypeError:
return itertools.repeat(val)
iterables= map(iter_or_repeat, iterables)
while iterables:
# need to care about python < 2.6
yield tuple([it.next() for it in iterables])
def weighted_choice(a_dict, normalize=True):
"""Returns a key from a dictionary based on the weight that the value suggests.
If 'normalize' is False, it is assumed the weights sum up to unity. Otherwise,
the algorithm will take care of normalising.
Example:
>>> d = {'a': 0.1, 'b': 0.5, 'c': 0.4}
>>> weighted_choice(d)
# draws 'b':'c':'a' with 5:4:1 probability
TODO: It might be good to either shuffle the order or explicitely specify it,
before walking through the items, to minimise possible degeneration.
"""
if normalize:
d = a_dict.copy()
s = sum(d.values())
for key, val in d.items():
d[key] = d[key] / s
else:
d = a_dict
rand_num = random.random()
total_rand = 0
for key, val in d.items():
total_rand += val
if total_rand > rand_num:
return key
return None
def bool_to_sign(an_array):
"""Return -1 for each False; +1 for each True"""
return numx.sign(an_array - 0.5)
def sign_to_bool(an_array, zero=True):
"""Return False for each negative value, else True.
The value for 0 is specified with 'zero'.
"""
if zero:
return numx.array(an_array) >= 0
else:
return numx.array(an_array) > 0
def gabor(size, alpha, phi, freq, sgm, x0=None, res=1, ampl=1.):
"""Return a 2D array containing a Gabor wavelet.
Input arguments:
size -- (height, width) (pixels)
alpha -- orientation (rad)
phi -- phase (rad)
freq -- frequency (cycles/deg)
sgm -- (sigma_x, sigma_y) standard deviation along the axis
of the gaussian ellipse (pixel)
x0 -- (x,y) coordinates of the center of the wavelet (pixel)
Default: None, meaning the center of the array
res -- spatial resolution (deg/pixel)
Default: 1, so that 'freq' is measured in cycles/pixel
ampl -- constant multiplying the result
Default: 1.
"""
# init
w, h = size
if x0 is None: x0 = (w//2, h//2)
y0, x0 = x0
# some useful quantities
freq *= res
sinalpha = numx.sin(alpha)
cosalpha = numx.cos(alpha)
v0, u0 = freq*cosalpha, freq*sinalpha
# coordinates
#x = numx.mgrid[-x0:w-x0, -y0:h-y0]
x = numx.meshgrid(numx.arange(w)-x0, numx.arange(h)-y0)
x = (x[0].T, x[1].T)
xr = x[0]*cosalpha - x[1]*sinalpha
yr = x[0]*sinalpha + x[1]*cosalpha
# gabor
im = ampl*numx.exp(-0.5*(xr*xr/(sgm[0]*sgm[0]) + yr*yr/(sgm[1]*sgm[1]))) \
*numx.cos(-2.*numx.pi*(u0*x[0]+v0*x[1]) - phi)
return im
def residuals(app_x, y_noisy, exp_funcs, x_orig, k=0.0):
"""Function used internally by invert_exp_funcs2 to approximate
inverses in ConstantExpansionNode. """
app_x = app_x.reshape((1,len(app_x)))
app_exp_x = numx.concatenate([func(app_x) for func in exp_funcs],axis=1)
div_y = numx.sqrt(len(y_noisy))
div_x = numx.sqrt(len(x_orig))
return numx.append( (1-k)*(y_noisy-app_exp_x[0]) / div_y, k * (x_orig - app_x[0])/div_x )
def invert_exp_funcs2(exp_x_noisy, dim_x, exp_funcs, use_hint=False, k=0.0):
"""Approximates a preimage app_x of exp_x_noisy.
Returns an array app_x, such that each row of exp_x_noisy is close
to each row of exp_funcs(app_x).
use_hint: determines the starting point for the approximation of the
preimage. There are three possibilities.
if it equals False: starting point is generated with a normal distribution
if it equals True: starting point is the first dim_x elements of exp_x_noisy
otherwise: use the parameter use_hint itself as the first approximation
k: weighting factor in [0, 1] to balance between approximation error and
closeness to the starting point. For instance:
objective function is to minimize:
(1-k) * |exp_funcs(app_x) - exp_x_noisy|/output_dim +
k * |app_x - starting point|/input_dim
Note: this function requires scipy.
"""
if numx_description != 'scipy':
raise NotImplementedError('This function requires scipy.')
else:
import scipy.optimize
num_samples = exp_x_noisy.shape[0]
if isinstance(use_hint, numx.ndarray):
app_x = use_hint.copy()
elif use_hint == True:
app_x = exp_x_noisy[:,0:dim_x].copy()
else:
app_x = numx.random.normal(size=(num_samples,dim_x))
for row in range(num_samples):
plsq = scipy.optimize.leastsq(residuals, app_x[row],
args=(exp_x_noisy[row], exp_funcs,
app_x[row], k), maxfev=50*dim_x)
app_x[row] = plsq[0]
app_exp_x = numx.concatenate([func(app_x) for func in exp_funcs],axis=1)
return app_x, app_exp_x
| |
# -*- coding: utf-8 -*-
import datetime
import math
from flask import current_app
from models import (
Achievement,
Coupon,
GlobalStatistics,
Level,
LevelInstanceUser,
LevelStatistics,
Organization,
OrganizationAchievement,
OrganizationCoupon,
OrganizationLevel,
OrganizationLevelValidation,
OrganizationStatistics,
Session,
Task,
User,
UserToken,
)
from utils import mongo_list_to_dict
class Job(object):
def __init__(self, task_id):
self.task_id = task_id
def update(self, data):
return Task.update_by_id(self.task_id, data)
def update_status(self, status):
self.update({
'$set': {
'status': status,
},
})
def call(self):
self.update_status('in progress')
# FIXME: decorate with try/except
self.run()
self.update_status('succeed')
class TestJob(Job):
name = 'test'
def run(self):
current_app.logger.warn('test job called')
class CleanJob(Job):
name = 'clean'
def run(self):
now = datetime.datetime.utcnow()
# Flush tasks collection
# current_app.data.driver.db['raw-tasks'].drop()
# Clean expired api tokens
UserToken.remove({
'expiry_date': {
'$lt': now,
},
})
# Clean expired level access tokens
LevelInstanceUser.remove({
'expiry_date': {
'$lt': now,
},
})
class UpdateStatsJob(Job):
name = 'update-stats'
def run(self):
# global statistics
gs = {}
gs['users'] = User.count({'active': True})
# FIXME: remove beta levels
gs['achievements'] = Achievement.count()
gs['expired_coupons'] = Coupon.count({
'validations_left': 0,
})
gs['coupons'] = Coupon.count()
gs['level_bought'] = OrganizationLevel.count()
gs['level_finished'] = OrganizationLevel.count({
'status': {
'$in': ['pending validation', 'validated'],
},
})
gs['levels'] = Level.count()
gs['organization_achievements'] = OrganizationAchievement.count()
gs['organization_coupons'] = OrganizationCoupon.count()
gs['organizations'] = Organization.count()
last_record = GlobalStatistics.last_record()
if not last_record or not all(item in last_record.items()
for item in gs.items()):
GlobalStatistics.post_internal(gs)
# level statistics
levels = mongo_list_to_dict(
Level.all()
)
for level in levels.values():
amount_bought = OrganizationLevel.count({
'level': level['_id'],
})
# FIXME: filter, only 1 by organization
amount_finished = OrganizationLevel.count({
'level': level['_id'],
'status': {
'$in': ['pending validation', 'validated'],
},
})
level['amount_bought'] = amount_bought
LevelStatistics.update_by_id(level['statistics'], {
'$set': {
'amount_bought': amount_bought,
'amount_finished': amount_finished,
# FIXME: fivestar average
# FIXME: duration average
# FIXME: amount hints bought
},
})
sessions = Session.all()
for session in sessions:
organization_levels = OrganizationLevel.find({
'session': session['_id'],
})
organizations = mongo_list_to_dict(
Organization.find({
'session': session['_id'],
})
)
validations = OrganizationLevelValidation.all()
for validation in validations:
if validation['status'] == 'refused':
continue
organization = organizations.get(validation['organization'])
if not organization:
continue
level = levels.get(validation['level'])
if not level:
continue
level['validations'] = level.get('validations', 0)
defaults = {
'validated_levels': [],
'gold_medals': 0,
'silver_medals': 0,
'bronze_medals': 0,
}
for key, value in defaults.items():
if key not in organization:
organization[key] = value
if level['validations'] == 0:
organization['gold_medals'] += 1
elif level['validations'] == 1:
organization['silver_medals'] += 1
elif level['validations'] == 2:
organization['bronze_medals'] += 1
organization['validated_levels'].append(level['_id'])
level['validations'] += 1
for organization in organizations.values():
coupons = OrganizationCoupon.count({
'organization': organization['_id'],
})
achievements = OrganizationAchievement.count({
'organization': organization['_id'],
})
validated_levels = list(set(
organization.get('validated_levels', [])
))
score = 0
score += organization.get('gold_medals', 0) * 9
score += organization.get('silver_medals', 0) * 5
score += organization.get('bronze_medals', 0) * 2
score += len(validated_levels) * 10
score += achievements * 2
for validated_level in validated_levels:
level = levels[validated_level]
percent = float(level['validations']) / float(level['amount_bought'])
score += int(round(-math.log(percent) * 2))
OrganizationStatistics.update_by_id(organization['statistics'], {
'$set': {
'coupons': coupons,
'score': score,
'achievements': achievements,
'gold_medals': organization.get('gold_medals', 0),
'silver_medals': organization.get('silver_medals', 0),
'bronze_medals': organization.get('bronze_medals', 0),
'finished_levels': len(validated_levels),
'bought_levels': OrganizationLevel.count({
'organization': organization['_id'],
})
},
})
JOBS_CLASSES = (
CleanJob,
TestJob,
UpdateStatsJob,
)
def setup_jobs(app):
jobs = {}
for job in JOBS_CLASSES:
jobs[job.name] = job
setattr(app, 'jobs', jobs)
| |
"""
Key bindings which are also known by GNU Readline by the given names.
See: http://www.delorie.com/gnu/docs/readline/rlman_13.html
"""
from __future__ import unicode_literals
from six.moves import range
import six
from .completion import generate_completions, display_completions_like_readline
from prompt_toolkit.document import Document
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.key_processor import KeyPress
from prompt_toolkit.key_binding.key_bindings import key_binding
from prompt_toolkit.keys import Keys
from prompt_toolkit.search import SearchDirection
from prompt_toolkit.selection import PasteMode
__all__ = [
'get_by_name',
]
# Registry that maps the Readline command names to their handlers.
_readline_commands = {}
def register(name):
"""
Store handler in the `_readline_commands` dictionary.
"""
assert isinstance(name, six.text_type)
def decorator(handler):
" `handler` is a callable or _Binding. "
_readline_commands[name] = handler
return handler
return decorator
def get_by_name(name):
"""
Return the handler for the (Readline) command with the given name.
"""
try:
return _readline_commands[name]
except KeyError:
raise KeyError('Unknown Readline command: %r' % name)
#
# Commands for moving
# See: http://www.delorie.com/gnu/docs/readline/rlman_14.html
#
@register('beginning-of-line')
def beginning_of_line(event):
" Move to the start of the current line. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_start_of_line_position(after_whitespace=False)
@register('end-of-line')
def end_of_line(event):
" Move to the end of the line. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_end_of_line_position()
@register('forward-char')
def forward_char(event):
" Move forward a character. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_cursor_right_position(count=event.arg)
@register('backward-char')
def backward_char(event):
" Move back a character. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_cursor_left_position(count=event.arg)
@register('forward-word')
def forward_word(event):
"""
Move forward to the end of the next word. Words are composed of letters and
digits.
"""
buff = event.current_buffer
pos = buff.document.find_next_word_ending(count=event.arg)
if pos:
buff.cursor_position += pos
@register('backward-word')
def backward_word(event):
"""
Move back to the start of the current or previous word. Words are composed
of letters and digits.
"""
buff = event.current_buffer
pos = buff.document.find_previous_word_beginning(count=event.arg)
if pos:
buff.cursor_position += pos
@register('clear-screen')
def clear_screen(event):
"""
Clear the screen and redraw everything at the top of the screen.
"""
event.app.renderer.clear()
@register('redraw-current-line')
def redraw_current_line(event):
"""
Refresh the current line.
(Readline defines this command, but prompt-toolkit doesn't have it.)
"""
pass
#
# Commands for manipulating the history.
# See: http://www.delorie.com/gnu/docs/readline/rlman_15.html
#
@register('accept-line')
def accept_line(event):
" Accept the line regardless of where the cursor is. "
event.current_buffer.validate_and_handle()
@register('previous-history')
def previous_history(event):
" Move `back` through the history list, fetching the previous command. "
event.current_buffer.history_backward(count=event.arg)
@register('next-history')
def next_history(event):
" Move `forward` through the history list, fetching the next command. "
event.current_buffer.history_forward(count=event.arg)
@register('beginning-of-history')
def beginning_of_history(event):
" Move to the first line in the history. "
event.current_buffer.go_to_history(0)
@register('end-of-history')
def end_of_history(event):
"""
Move to the end of the input history, i.e., the line currently being entered.
"""
event.current_buffer.history_forward(count=10**100)
buff = event.current_buffer
buff.go_to_history(len(buff._working_lines) - 1)
@register('reverse-search-history')
def reverse_search_history(event):
"""
Search backward starting at the current line and moving `up` through
the history as necessary. This is an incremental search.
"""
control = event.app.layout.current_control
if control.search_buffer_control:
event.app.current_search_state.direction = SearchDirection.BACKWARD
event.app.layout.current_control = control.search_buffer_control
#
# Commands for changing text
#
@register('end-of-file')
def end_of_file(event):
"""
Exit.
"""
event.app.exit()
@register('delete-char')
def delete_char(event):
" Delete character before the cursor. "
deleted = event.current_buffer.delete(count=event.arg)
if not deleted:
event.app.output.bell()
@register('backward-delete-char')
def backward_delete_char(event):
" Delete the character behind the cursor. "
if event.arg < 0:
# When a negative argument has been given, this should delete in front
# of the cursor.
deleted = event.current_buffer.delete(count=-event.arg)
else:
deleted = event.current_buffer.delete_before_cursor(count=event.arg)
if not deleted:
event.app.output.bell()
@register('self-insert')
def self_insert(event):
" Insert yourself. "
event.current_buffer.insert_text(event.data * event.arg)
@register('transpose-chars')
def transpose_chars(event):
"""
Emulate Emacs transpose-char behavior: at the beginning of the buffer,
do nothing. At the end of a line or buffer, swap the characters before
the cursor. Otherwise, move the cursor right, and then swap the
characters before the cursor.
"""
b = event.current_buffer
p = b.cursor_position
if p == 0:
return
elif p == len(b.text) or b.text[p] == '\n':
b.swap_characters_before_cursor()
else:
b.cursor_position += b.document.get_cursor_right_position()
b.swap_characters_before_cursor()
@register('uppercase-word')
def uppercase_word(event):
"""
Uppercase the current (or following) word.
"""
buff = event.current_buffer
for i in range(event.arg):
pos = buff.document.find_next_word_ending()
words = buff.document.text_after_cursor[:pos]
buff.insert_text(words.upper(), overwrite=True)
@register('downcase-word')
def downcase_word(event):
"""
Lowercase the current (or following) word.
"""
buff = event.current_buffer
for i in range(event.arg): # XXX: not DRY: see meta_c and meta_u!!
pos = buff.document.find_next_word_ending()
words = buff.document.text_after_cursor[:pos]
buff.insert_text(words.lower(), overwrite=True)
@register('capitalize-word')
def capitalize_word(event):
"""
Capitalize the current (or following) word.
"""
buff = event.current_buffer
for i in range(event.arg):
pos = buff.document.find_next_word_ending()
words = buff.document.text_after_cursor[:pos]
buff.insert_text(words.title(), overwrite=True)
@register('quoted-insert')
def quoted_insert(event):
"""
Add the next character typed to the line verbatim. This is how to insert
key sequences like C-q, for example.
"""
event.app.quoted_insert = True
#
# Killing and yanking.
#
@register('kill-line')
def kill_line(event):
"""
Kill the text from the cursor to the end of the line.
If we are at the end of the line, this should remove the newline.
(That way, it is possible to delete multiple lines by executing this
command multiple times.)
"""
buff = event.current_buffer
if event.arg < 0:
deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position())
else:
if buff.document.current_char == '\n':
deleted = buff.delete(1)
else:
deleted = buff.delete(count=buff.document.get_end_of_line_position())
event.app.clipboard.set_text(deleted)
@register('kill-word')
def kill_word(event):
"""
Kill from point to the end of the current word, or if between words, to the
end of the next word. Word boundaries are the same as forward-word.
"""
buff = event.current_buffer
pos = buff.document.find_next_word_ending(count=event.arg)
if pos:
deleted = buff.delete(count=pos)
if event.is_repeat:
deleted = event.app.clipboard.get_data().text + deleted
event.app.clipboard.set_text(deleted)
@register('unix-word-rubout')
def unix_word_rubout(event, WORD=True):
"""
Kill the word behind point, using whitespace as a word boundary.
Usually bound to ControlW.
"""
buff = event.current_buffer
pos = buff.document.find_start_of_previous_word(count=event.arg, WORD=WORD)
if pos is None:
# Nothing found? delete until the start of the document. (The
# input starts with whitespace and no words were found before the
# cursor.)
pos = - buff.cursor_position
if pos:
deleted = buff.delete_before_cursor(count=-pos)
# If the previous key press was also Control-W, concatenate deleted
# text.
if event.is_repeat:
deleted += event.app.clipboard.get_data().text
event.app.clipboard.set_text(deleted)
else:
# Nothing to delete. Bell.
event.app.output.bell()
@register('backward-kill-word')
def backward_kill_word(event):
"""
Kills the word before point, using "not a letter nor a digit" as a word boundary.
Usually bound to M-Del or M-Backspace.
"""
unix_word_rubout(event, WORD=False)
@register('delete-horizontal-space')
def delete_horizontal_space(event):
" Delete all spaces and tabs around point. "
buff = event.current_buffer
text_before_cursor = buff.document.text_before_cursor
text_after_cursor = buff.document.text_after_cursor
delete_before = len(text_before_cursor) - len(text_before_cursor.rstrip('\t '))
delete_after = len(text_after_cursor) - len(text_after_cursor.lstrip('\t '))
buff.delete_before_cursor(count=delete_before)
buff.delete(count=delete_after)
@register('unix-line-discard')
def unix_line_discard(event):
"""
Kill backward from the cursor to the beginning of the current line.
"""
buff = event.current_buffer
if buff.document.cursor_position_col == 0 and buff.document.cursor_position > 0:
buff.delete_before_cursor(count=1)
else:
deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position())
event.app.clipboard.set_text(deleted)
@register('yank')
def yank(event):
"""
Paste before cursor.
"""
event.current_buffer.paste_clipboard_data(
event.app.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.EMACS)
@register('yank-nth-arg')
def yank_nth_arg(event):
"""
Insert the first argument of the previous command. With an argument, insert
the nth word from the previous command (start counting at 0).
"""
n = (event.arg if event.arg_present else None)
event.current_buffer.yank_nth_arg(n)
@register('yank-last-arg')
def yank_last_arg(event):
"""
Like `yank_nth_arg`, but if no argument has been given, yank the last word
of each line.
"""
n = (event.arg if event.arg_present else None)
event.current_buffer.yank_last_arg(n)
@register('yank-pop')
def yank_pop(event):
"""
Rotate the kill ring, and yank the new top. Only works following yank or
yank-pop.
"""
buff = event.current_buffer
doc_before_paste = buff.document_before_paste
clipboard = event.app.clipboard
if doc_before_paste is not None:
buff.document = doc_before_paste
clipboard.rotate()
buff.paste_clipboard_data(
clipboard.get_data(), paste_mode=PasteMode.EMACS)
#
# Completion.
#
@register('complete')
def complete(event):
" Attempt to perform completion. "
display_completions_like_readline(event)
@register('menu-complete')
def menu_complete(event):
"""
Generate completions, or go to the next completion. (This is the default
way of completing input in prompt_toolkit.)
"""
generate_completions(event)
@register('menu-complete-backward')
def menu_complete_backward(event):
" Move backward through the list of possible completions. "
event.current_buffer.complete_previous()
#
# Keyboard macros.
#
@register('start-kbd-macro')
def start_kbd_macro(event):
"""
Begin saving the characters typed into the current keyboard macro.
"""
event.app.emacs_state.start_macro()
@register('end-kbd-macro')
def end_kbd_macro(event):
"""
Stop saving the characters typed into the current keyboard macro and save
the definition.
"""
event.app.emacs_state.end_macro()
@register('call-last-kbd-macro')
@key_binding(record_in_macro=False)
def call_last_kbd_macro(event):
"""
Re-execute the last keyboard macro defined, by making the characters in the
macro appear as if typed at the keyboard.
Notice that we pass `record_in_macro=False`. This ensures that the 'c-x e'
key sequence doesn't appear in the recording itself. This function inserts
the body of the called macro back into the KeyProcessor, so these keys will
be added later on to the macro of their handlers have `record_in_macro=True`.
"""
# Insert the macro.
event.app.key_processor.feed_multiple(
event.app.emacs_state.macro, first=True)
@register('print-last-kbd-macro')
def print_last_kbd_macro(event):
" Print the last keyboard macro. "
# TODO: Make the format suitable for the inputrc file.
def print_macro():
for k in event.app.key_processor.macro:
print(k)
from prompt_toolkit.application.run_in_terminal import run_in_terminal
run_in_terminal(print_macro)
#
# Miscellaneous Commands.
#
@register('undo')
def undo(event):
" Incremental undo. "
event.current_buffer.undo()
@register('insert-comment')
def insert_comment(event):
"""
Without numeric argument, comment all lines.
With numeric argument, uncomment all lines.
In any case accept the input.
"""
buff = event.current_buffer
# Transform all lines.
if event.arg != 1:
def change(line):
return line[1:] if line.startswith('#') else line
else:
def change(line):
return '#' + line
buff.document = Document(
text='\n'.join(map(change, buff.text.splitlines())),
cursor_position=0)
# Accept input.
buff.validate_and_handle()
@register('vi-editing-mode')
def vi_editing_mode(event):
" Switch to Vi editing mode. "
event.app.editing_mode = EditingMode.VI
@register('emacs-editing-mode')
def emacs_editing_mode(event):
" Switch to Emacs editing mode. "
event.app.editing_mode = EditingMode.EMACS
@register('prefix-meta')
def prefix_meta(event):
"""
Metafy the next character typed. This is for keyboards without a meta key.
Sometimes people also want to bind other keys to Meta, e.g. 'jj'::
key_bindings.add_key_binding('j', 'j', filter=ViInsertMode())(prefix_meta)
"""
# ('first' should be true, because we want to insert it at the current
# position in the queue.)
event.app.key_processor.feed(KeyPress(Keys.Escape), first=True)
@register('operate-and-get-next')
def operate_and_get_next(event):
"""
Accept the current line for execution and fetch the next line relative to
the current line from the history for editing.
"""
buff = event.current_buffer
new_index = buff.working_index + 1
# Accept the current input. (This will also redraw the interface in the
# 'done' state.)
buff.validate_and_handle()
# Set the new index at the start of the next run.
def set_working_index():
if new_index < len(buff._working_lines):
buff.working_index = new_index
event.app.pre_run_callables.append(set_working_index)
@register('edit-and-execute-command')
def edit_and_execute(event):
"""
Invoke an editor on the current command line, and accept the result.
"""
buff = event.current_buffer
buff.open_in_editor(validate_and_handle=True)
| |
"""The tests for the Unifi WAP device tracker platform."""
from unittest import mock
from datetime import datetime, timedelta
import pytest
import voluptuous as vol
import homeassistant.util.dt as dt_util
from homeassistant.components.device_tracker import DOMAIN
import homeassistant.components.unifi.device_tracker as unifi
from homeassistant.const import (CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
CONF_PLATFORM, CONF_VERIFY_SSL,
CONF_MONITORED_CONDITIONS)
from tests.common import mock_coro
from asynctest import CoroutineMock
from aiounifi.clients import Clients
DEFAULT_DETECTION_TIME = timedelta(seconds=300)
@pytest.fixture
def mock_ctrl():
"""Mock pyunifi."""
with mock.patch('aiounifi.Controller') as mock_control:
mock_control.return_value.login.return_value = mock_coro()
mock_control.return_value.initialize.return_value = mock_coro()
yield mock_control
@pytest.fixture
def mock_scanner():
"""Mock UnifyScanner."""
with mock.patch('homeassistant.components.unifi.device_tracker'
'.UnifiScanner') as scanner:
yield scanner
@mock.patch('os.access', return_value=True)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
async def test_config_valid_verify_ssl(hass, mock_scanner, mock_ctrl):
"""Test the setup with a string for ssl_verify.
Representing the absolute path to a CA certificate bundle.
"""
config = {
DOMAIN: unifi.PLATFORM_SCHEMA({
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
CONF_VERIFY_SSL: "/tmp/unifi.crt"
})
}
result = await unifi.async_get_scanner(hass, config)
assert mock_scanner.return_value == result
assert mock_ctrl.call_count == 1
assert mock_scanner.call_count == 1
assert mock_scanner.call_args == mock.call(mock_ctrl.return_value,
DEFAULT_DETECTION_TIME,
None, None)
async def test_config_minimal(hass, mock_scanner, mock_ctrl):
"""Test the setup with minimal configuration."""
config = {
DOMAIN: unifi.PLATFORM_SCHEMA({
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
})
}
result = await unifi.async_get_scanner(hass, config)
assert mock_scanner.return_value == result
assert mock_ctrl.call_count == 1
assert mock_scanner.call_count == 1
assert mock_scanner.call_args == mock.call(mock_ctrl.return_value,
DEFAULT_DETECTION_TIME,
None, None)
async def test_config_full(hass, mock_scanner, mock_ctrl):
"""Test the setup with full configuration."""
config = {
DOMAIN: unifi.PLATFORM_SCHEMA({
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
CONF_HOST: 'myhost',
CONF_VERIFY_SSL: False,
CONF_MONITORED_CONDITIONS: ['essid', 'signal'],
'port': 123,
'site_id': 'abcdef01',
'detection_time': 300,
})
}
result = await unifi.async_get_scanner(hass, config)
assert mock_scanner.return_value == result
assert mock_ctrl.call_count == 1
assert mock_scanner.call_count == 1
assert mock_scanner.call_args == mock.call(
mock_ctrl.return_value,
DEFAULT_DETECTION_TIME,
None,
config[DOMAIN][CONF_MONITORED_CONDITIONS])
def test_config_error():
"""Test for configuration errors."""
with pytest.raises(vol.Invalid):
unifi.PLATFORM_SCHEMA({
# no username
CONF_PLATFORM: unifi.DOMAIN,
CONF_HOST: 'myhost',
'port': 123,
})
with pytest.raises(vol.Invalid):
unifi.PLATFORM_SCHEMA({
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
CONF_HOST: 'myhost',
'port': 'foo', # bad port!
})
with pytest.raises(vol.Invalid):
unifi.PLATFORM_SCHEMA({
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
CONF_VERIFY_SSL: "dfdsfsdfsd", # Invalid ssl_verify (no file)
})
async def test_config_controller_failed(hass, mock_ctrl, mock_scanner):
"""Test for controller failure."""
config = {
'device_tracker': {
CONF_PLATFORM: unifi.DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
}
}
mock_ctrl.side_effect = unifi.CannotConnect
result = await unifi.async_get_scanner(hass, config)
assert result is False
async def test_scanner_update():
"""Test the scanner update."""
ctrl = mock.MagicMock()
fake_clients = [
{'mac': '123', 'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '234', 'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
]
ctrl.clients = Clients([], CoroutineMock(return_value=fake_clients))
scnr = unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, None, None)
await scnr.async_update()
assert len(scnr._clients) == 2
def test_scanner_update_error():
"""Test the scanner update for error."""
ctrl = mock.MagicMock()
ctrl.get_clients.side_effect = unifi.aiounifi.AiounifiException
unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, None, None)
async def test_scan_devices():
"""Test the scanning for devices."""
ctrl = mock.MagicMock()
fake_clients = [
{'mac': '123', 'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '234', 'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
]
ctrl.clients = Clients([], CoroutineMock(return_value=fake_clients))
scnr = unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, None, None)
await scnr.async_update()
assert set(await scnr.async_scan_devices()) == set(['123', '234'])
async def test_scan_devices_filtered():
"""Test the scanning for devices based on SSID."""
ctrl = mock.MagicMock()
fake_clients = [
{'mac': '123', 'essid': 'foonet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '234', 'essid': 'foonet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '567', 'essid': 'notnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '890', 'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
]
ssid_filter = ['foonet', 'barnet']
ctrl.clients = Clients([], CoroutineMock(return_value=fake_clients))
scnr = unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, ssid_filter, None)
await scnr.async_update()
assert set(await scnr.async_scan_devices()) == set(['123', '234', '890'])
async def test_get_device_name():
"""Test the getting of device names."""
ctrl = mock.MagicMock()
fake_clients = [
{'mac': '123',
'hostname': 'foobar',
'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '234',
'name': 'Nice Name',
'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '456',
'essid': 'barnet',
'last_seen': '1504786810'},
]
ctrl.clients = Clients([], CoroutineMock(return_value=fake_clients))
scnr = unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, None, None)
await scnr.async_update()
assert scnr.get_device_name('123') == 'foobar'
assert scnr.get_device_name('234') == 'Nice Name'
assert scnr.get_device_name('456') is None
assert scnr.get_device_name('unknown') is None
async def test_monitored_conditions():
"""Test the filtering of attributes."""
ctrl = mock.MagicMock()
fake_clients = [
{'mac': '123',
'hostname': 'foobar',
'essid': 'barnet',
'signal': -60,
'last_seen': dt_util.as_timestamp(dt_util.utcnow()),
'latest_assoc_time': 946684800.0},
{'mac': '234',
'name': 'Nice Name',
'essid': 'barnet',
'signal': -42,
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
{'mac': '456',
'hostname': 'wired',
'essid': 'barnet',
'last_seen': dt_util.as_timestamp(dt_util.utcnow())},
]
ctrl.clients = Clients([], CoroutineMock(return_value=fake_clients))
scnr = unifi.UnifiScanner(ctrl, DEFAULT_DETECTION_TIME, None,
['essid', 'signal', 'latest_assoc_time'])
await scnr.async_update()
assert scnr.get_extra_attributes('123') == {
'essid': 'barnet',
'signal': -60,
'latest_assoc_time': datetime(2000, 1, 1, 0, 0, tzinfo=dt_util.UTC)
}
assert scnr.get_extra_attributes('234') == {
'essid': 'barnet',
'signal': -42
}
assert scnr.get_extra_attributes('456') == {'essid': 'barnet'}
| |
from collections import defaultdict
from json import JSONEncoder
import logging
import hashlib
class Node(object):
APPENDIX = u'appendix'
INTERP = u'interp'
REGTEXT = u'regtext'
SUBPART = u'subpart'
EMPTYPART = u'emptypart'
INTERP_MARK = 'Interp'
def __init__(self, text='', children=[], label=[], title=None,
node_type=REGTEXT, source_xml=None, tagged_text=''):
self.text = unicode(text)
# defensive copy
self.children = list(children)
self.label = [unicode(l) for l in label if l != '']
title = unicode(title or '')
self.title = title or None
self.node_type = node_type
self.source_xml = source_xml
self.marker = None
self.tagged_text = tagged_text
def __repr__(self):
return (("Node( text = %s, children = %s, label = %s, title = %s, "
+ "node_type = %s)") % (repr(self.text), repr(self.children),
repr(self.label), repr(self.title), repr(self.node_type)))
def __cmp__(self, other):
return cmp(repr(self), repr(other))
def label_id(self):
return '-'.join(self.label)
class NodeEncoder(JSONEncoder):
"""Custom JSON encoder to handle Node objects"""
def default(self, obj):
if isinstance(obj, Node):
fields = dict(obj.__dict__)
if obj.title is None:
del fields['title']
if obj.marker is None:
del fields['marker']
for field in ('tagged_text', 'source_xml', 'child_labels'):
if field in fields:
del fields[field]
return fields
return super(NodeEncoder, self).default(obj)
def node_decode_hook(d):
"""Convert a JSON object into a Node"""
if set(
('text', 'children',
'label', 'node_type')) - set(d.keys()) == set():
return Node(
d['text'], d['children'], d['label'],
d.get('title', None), d['node_type'])
else:
return d
def walk(node, fn):
"""Perform fn for every node in the tree. Pre-order traversal. fn must
be a function that accepts a root node."""
result = fn(node)
if result is not None:
results = [result]
else:
results = []
for child in node.children:
results += walk(child, fn)
return results
def find(root, label):
"""Search through the tree to find the node with this label."""
def check(node):
if node.label_id() == label:
return node
response = walk(root, check)
if response:
return response[0]
else:
logging.warning('Failed to locate node with label {}'.format(label))
return None
def join_text(node):
"""Join the text of this node and all children"""
bits = []
walk(node, lambda n: bits.append(n.text))
return ''.join(bits)
def merge_duplicates(nodes):
"""Given a list of nodes with the same-length label, merge any
duplicates (by combining their children)"""
found_pair = None
for lidx, lhs in enumerate(nodes):
for ridx, rhs in enumerate(nodes[lidx + 1:], lidx + 1):
if lhs.label == rhs.label:
found_pair = (lidx, ridx)
if found_pair:
lidx, ridx = found_pair
lhs, rhs = nodes[lidx], nodes[ridx]
lhs.children.extend(rhs.children)
return merge_duplicates(nodes[:ridx] + nodes[ridx + 1:])
else:
return nodes
def treeify(nodes):
"""Given a list of nodes, convert those nodes into the appropriate tree
structure based on their labels. This assumes that all nodes will fall
under a set of 'root' nodes, which have the min-length label."""
if not nodes:
return nodes
min_len, with_min = len(nodes[0].label), []
for node in nodes:
if len(node.label) == min_len:
with_min.append(node)
elif len(node.label) < min_len:
min_len = len(node.label)
with_min = [node]
with_min = merge_duplicates(with_min)
roots = []
for root in with_min:
if root.label[-1] == Node.INTERP_MARK:
is_child = lambda n: n.label[:len(root.label)-1] == root.label[:-1]
else:
is_child = lambda n: n.label[:len(root.label)] == root.label
children = [n for n in nodes if n.label != root.label and is_child(n)]
root.children = root.children + treeify(children)
roots.append(root)
return roots
class FrozenNode(object):
"""Immutable interface for nodes. No guarantees about internal state."""
_pool = defaultdict(set) # collection of all FrozenNodes, keyed by hash
def __init__(self, text='', children=(), label=(), title='',
node_type=Node.REGTEXT, tagged_text=''):
self._text = text or ''
self._children = tuple(children)
self._label = tuple(label)
self._title = title or ''
self._node_type = node_type
self._tagged_text = tagged_text or ''
self._hash = self._generate_hash()
FrozenNode._pool[self.hash].add(self)
@property
def text(self):
return self._text
@property
def children(self):
return self._children
@property
def label(self):
return self._label
@property
def title(self):
return self._title
@property
def node_type(self):
return self._node_type
@property
def tagged_text(self):
return self._tagged_text
@property
def hash(self):
return self._hash
def _generate_hash(self):
"""Called during instantiation. Digests all fields"""
hasher = hashlib.sha256()
hasher.update(self.text.encode('utf-8'))
hasher.update(self.tagged_text.encode('utf-8'))
hasher.update(self.title.encode('utf-8'))
hasher.update(self.label_id.encode('utf-8'))
hasher.update(self.node_type)
for child in self.children:
hasher.update(child.hash)
return hasher.hexdigest()
def __hash__(self):
"""As the hash property is already distinctive, re-use it"""
return hash(self.hash)
def __eq__(self, other):
"""We define equality as having the same fields except for children.
Instead of recursively inspecting them, we compare only their hash
(this is a Merkle tree)"""
return (other.__class__ == self.__class__
and self.hash == other.hash
# Compare the fields to limit the effect of hash collisions
and self.text == other.text
and self.title == other.title
and self.node_type == other.node_type
and self.tagged_text == other.tagged_text
and self.label_id == other.label_id
and [c.hash for c in self.children] ==
[c.hash for c in other.children])
@staticmethod
def from_node(node):
"""Convert a struct.Node (or similar) into a struct.FrozenNode. This
also checks if this node has already been instantiated. If so, it
returns the instantiated version (i.e. only one of each identical node
exists in memory)"""
children = map(FrozenNode.from_node, node.children)
fresh = FrozenNode(text=node.text, children=children, label=node.label,
title=node.title or '', node_type=node.node_type,
tagged_text=getattr(node, 'tagged_text', '') or '')
for el in FrozenNode._pool[fresh.hash]:
if el == fresh:
return el # note we are _not_ returning fresh
@property
def label_id(self):
"""Convert label into a string"""
if not hasattr(self, '_label_id'):
self._label_id = '-'.join(self.label)
return self._label_id
| |
from unittest import TestCase
import simplejson as json
from qrl.core import config
from qrl.core.Indexer import Indexer
from qrl.core.State import State
from qrl.core.StateContainer import StateContainer
from qrl.core.misc import logger
from qrl.core.OptimizedAddressState import OptimizedAddressState
from qrl.core.MultiSigAddressState import MultiSigAddressState
from qrl.core.txs.multisig.MultiSigSpend import MultiSigSpend
from tests.core.txs.testdata import test_json_MultiSigSpend
from qrl.generated.qrl_pb2 import SlaveMetadata
from tests.misc.helper import get_alice_xmss, get_bob_xmss, set_qrl_dir, set_hard_fork_block_number
logger.initialize_default()
class TestMultiSigSpend(TestCase):
def __init__(self, *args, **kwargs):
super(TestMultiSigSpend, self).__init__(*args, **kwargs)
with set_qrl_dir('no_data'):
self.state = State()
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.random = get_alice_xmss(4)
self.random_signer = get_bob_xmss(4)
self.signatories = [self.alice.address, self.bob.address, self.random.address]
self.weights = [20, 30, 10]
self.threshold = 30
def test_create(self):
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=0,
xmss_pk=self.alice.pk)
self.assertIsInstance(tx, MultiSigSpend)
def test_to_json(self):
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=0,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_MultiSigSpend), json.loads(txjson))
def test_validate_custom(self):
"""
MultiSigCreate _validate_custom() only checks if fee == 0
"""
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.alice.address],
amounts=[100],
expiry_block_number=15000,
fee=0,
xmss_pk=self.random_signer.pk)
del tx._data.multi_sig_spend.addrs_to[-1]
result = tx._validate_custom()
self.assertFalse(result)
tx._data.multi_sig_spend.addrs_to.extend([self.alice.address])
result = tx._validate_custom()
self.assertTrue(result)
del tx._data.multi_sig_spend.amounts[-1]
result = tx._validate_custom()
self.assertFalse(result)
tx._data.multi_sig_spend.amounts.extend([100])
result = tx._validate_custom()
self.assertTrue(result)
tx._data.multi_sig_spend.multi_sig_address = self.bob.address
result = tx._validate_custom()
self.assertFalse(result)
@set_hard_fork_block_number()
def test_validate_extended(self):
"""
TODO: Check by signing txn from a non signatory address
"""
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=5,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
alice_address_state = OptimizedAddressState.get_default(address=self.alice.address)
alice_address_state.pbdata.balance = 5
multi_sig_address_state = MultiSigAddressState.create(creation_tx_hash=b'',
balance=100,
signatories=[self.alice.address, self.bob.address],
weights=[4, 6],
threshold=5,
transaction_hash_count=0)
addresses_state = {
self.alice.address: alice_address_state,
multi_sig_address: multi_sig_address_state,
}
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=10,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=None,
batch=None)
result = tx._validate_extended(state_container)
self.assertTrue(result)
alice_address_state.pbdata.balance = 0
result = tx._validate_extended(state_container)
self.assertFalse(result)
alice_address_state.pbdata.balance = 5
result = tx._validate_extended(state_container)
self.assertTrue(result)
multi_sig_address_state.pbdata.balance = 99
result = tx._validate_extended(state_container)
self.assertFalse(result)
multi_sig_address_state.pbdata.balance = 100
result = tx._validate_extended(state_container)
self.assertTrue(result)
tx.pbdata.multi_sig_spend.expiry_block_number = 10
result = tx._validate_extended(state_container)
self.assertFalse(result)
tx.pbdata.multi_sig_spend.expiry_block_number = 15000
result = tx._validate_extended(state_container)
self.assertTrue(result)
@set_hard_fork_block_number()
def test_validate_all(self):
"""
TODO: Check by signing txn from a non signatory address
"""
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=5,
xmss_pk=self.random.pk,
master_addr=self.alice.address)
tx.sign(self.random)
tx._data.nonce = 1
alice_address_state = OptimizedAddressState.get_default(address=self.alice.address)
alice_address_state.pbdata.balance = 5
random_address_state = OptimizedAddressState.get_default(address=self.random.address)
multi_sig_address_state = MultiSigAddressState.create(creation_tx_hash=b'',
balance=100,
signatories=[self.alice.address, self.bob.address],
weights=[4, 6],
threshold=5,
transaction_hash_count=0)
addresses_state = {
self.alice.address: alice_address_state,
self.random.address: random_address_state,
multi_sig_address: multi_sig_address_state,
}
slaves = Indexer(b'slave', None)
slaves.data[(self.alice.address, self.random.pk)] = SlaveMetadata(access_type=0)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=slaves,
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=10,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=False,
my_db=self.state._db,
batch=None)
result = tx.validate_all(state_container)
self.assertTrue(result)
tx._data.nonce = 2
result = tx.validate_all(state_container)
self.assertFalse(result) # False as nonce is invalid
def test_apply(self):
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
multi_sig_address_state = MultiSigAddressState.create(creation_tx_hash=b'',
balance=100,
signatories=[self.alice.address, self.bob.address],
weights=[4, 6],
threshold=5,
transaction_hash_count=0)
alice_address_state = OptimizedAddressState.get_default(self.alice.address)
alice_address_state.pbdata.balance = 5
bob_address_state = OptimizedAddressState.get_default(self.bob.address)
addresses_state = {
self.alice.address: alice_address_state,
self.bob.address: bob_address_state,
multi_sig_address: multi_sig_address_state,
}
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=5,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
tx.ots_key))
tx.apply(self.state, state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
tx.ots_key))
self.assertIn(tx.txhash, state_container.votes_stats)
vote_stats = state_container.votes_stats[tx.txhash]
self.assertEqual(vote_stats.shared_key, tx.txhash)
self.assertEqual(vote_stats.total_weight, 0)
self.assertEqual(vote_stats.signatories, multi_sig_address_state.signatories)
def test_revert(self):
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
multi_sig_address_state = MultiSigAddressState.create(creation_tx_hash=b'',
balance=100,
signatories=[self.alice.address, self.bob.address],
weights=[4, 6],
threshold=5,
transaction_hash_count=0)
alice_address_state = OptimizedAddressState.get_default(self.alice.address)
alice_address_state.pbdata.balance = 5
alice_address_state.update_ots_bitfield_used_page()
alice_address_state.used_ots_key_count += 1
alice_address_state.update_multi_sig_address_count()
bob_address_state = OptimizedAddressState.get_default(self.bob.address)
addresses_state = {
self.alice.address: alice_address_state,
self.bob.address: bob_address_state,
multi_sig_address: multi_sig_address_state,
}
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=5,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
tx._data.nonce = 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
tx.ots_key))
tx.apply(self.state, state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
tx.ots_key))
self.assertIn(tx.txhash, state_container.votes_stats)
vote_stats = state_container.votes_stats[tx.txhash]
self.assertEqual(vote_stats.shared_key, tx.txhash)
self.assertEqual(vote_stats.total_weight, 0)
self.assertEqual(vote_stats.signatories, multi_sig_address_state.signatories)
tx.revert(self.state, state_container)
self.assertNotIn(tx.txhash, state_container.votes_stats)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
tx.ots_key))
def test_affected_address(self):
# This transaction can only involve 2 addresses.
affected_addresses = set()
multi_sig_address = MultiSigAddressState.generate_multi_sig_address(b'')
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=[self.bob.address],
amounts=[100],
expiry_block_number=15000,
fee=5,
xmss_pk=self.alice.pk)
tx.set_affected_address(affected_addresses)
self.assertEqual(3, len(affected_addresses))
self.assertIn(self.alice.address, affected_addresses)
self.assertIn(multi_sig_address, affected_addresses)
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import cStringIO
import datetime
import logging
import os
import pipes
import platform
import Queue
import re
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
RETRY_MAX = 3
RETRY_INITIAL_SLEEP = 0.5
START = datetime.datetime.now()
_WARNINGS = []
# These repos are known to cause OOM errors on 32-bit platforms, due the the
# very large objects they contain. It is not safe to use threaded index-pack
# when cloning/fetching them.
THREADED_INDEX_PACK_BLACKLIST = [
'https://chromium.googlesource.com/chromium/reference_builds/chrome_win.git'
]
class Error(Exception):
"""gclient exception class."""
def __init__(self, msg, *args, **kwargs):
index = getattr(threading.currentThread(), 'index', 0)
if index:
msg = '\n'.join('%d> %s' % (index, l) for l in msg.splitlines())
super(Error, self).__init__(msg, *args, **kwargs)
def Elapsed(until=None):
if until is None:
until = datetime.datetime.now()
return str(until - START).partition('.')[0]
def PrintWarnings():
"""Prints any accumulated warnings."""
if _WARNINGS:
print >> sys.stderr, '\n\nWarnings:'
for warning in _WARNINGS:
print >> sys.stderr, warning
def AddWarning(msg):
"""Adds the given warning message to the list of accumulated warnings."""
_WARNINGS.append(msg)
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-.\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.rsplit('@', 1)
if re.match(r'^\w+\@', url) and '@' not in components[0]:
components = [url]
if len(components) == 1:
components += [None]
return tuple(components)
def IsGitSha(revision):
"""Returns true if the given string is a valid hex-encoded sha"""
return re.match('^[a-fA-F0-9]{6,40}$', revision) is not None
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def safe_rename(old, new):
"""Renames a file reliably.
Sometimes os.rename does not work because a dying git process keeps a handle
on it for a few seconds. An exception is then thrown, which make the program
give up what it was doing and remove what was deleted.
The only solution is to catch the exception and try again until it works.
"""
# roughly 10s
retries = 100
for i in range(retries):
try:
os.rename(old, new)
break
except OSError:
if i == (retries - 1):
# Give up.
raise
# retry
logging.debug("Renaming failed from %s to %s. Retrying ..." % (old, new))
time.sleep(0.1)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
path = os.path.normcase(path)
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', path])
if exitcode == 0:
return
else:
print >> sys.stderr, 'rd exited with code %d' % exitcode
time.sleep(3)
raise Exception('Failed to remove path %s' % path)
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CommandToStr(args):
"""Converts an arg list into a shell escaped string."""
return ' '.join(pipes.quote(arg) for arg in args)
def CheckCallAndFilterAndHeader(args, always=False, header=None, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.setdefault('stdout', sys.stdout)
if header is None:
header = "\n________ running '%s' in '%s'\n" % (
' '.join(args), kwargs.get('cwd', '.'))
if always:
stdout.write(header)
else:
filter_fn = kwargs.get('filter_fn')
def filter_msg(line):
if line is None:
stdout.write(header)
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs.setdefault('print_stdout', True)
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
GCLIENT_CHILDREN = []
GCLIENT_CHILDREN_LOCK = threading.Lock()
class GClientChildren(object):
@staticmethod
def add(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.append(popen_obj)
@staticmethod
def remove(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.remove(popen_obj)
@staticmethod
def _attemptToKillChildren():
global GCLIENT_CHILDREN
with GCLIENT_CHILDREN_LOCK:
zombies = [c for c in GCLIENT_CHILDREN if c.poll() is None]
for zombie in zombies:
try:
zombie.kill()
except OSError:
pass
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN = [k for k in GCLIENT_CHILDREN if k.poll() is not None]
@staticmethod
def _areZombies():
with GCLIENT_CHILDREN_LOCK:
return bool(GCLIENT_CHILDREN)
@staticmethod
def KillAllRemainingChildren():
GClientChildren._attemptToKillChildren()
if GClientChildren._areZombies():
time.sleep(0.5)
GClientChildren._attemptToKillChildren()
with GCLIENT_CHILDREN_LOCK:
if GCLIENT_CHILDREN:
print >> sys.stderr, 'Could not kill the following subprocesses:'
for zombie in GCLIENT_CHILDREN:
print >> sys.stderr, ' ', zombie.pid
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
retry=False, **kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
retry: If the process exits non-zero, sleep for a brief interval and try
again, up to RETRY_MAX times.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
output = cStringIO.StringIO()
filter_fn = filter_fn or (lambda x: None)
sleep_interval = RETRY_INITIAL_SLEEP
run_cwd = kwargs.get('cwd', os.getcwd())
for _ in xrange(RETRY_MAX + 1):
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
GClientChildren.add(kid)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
output.write(in_byte)
if print_stdout:
stdout.write(in_byte)
if in_byte not in ['\r', '\n']:
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
# Don't put this in a 'finally,' since the child may still run if we get
# an exception.
GClientChildren.remove(kid)
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv == 0:
return output.getvalue()
if not retry:
break
print ("WARNING: subprocess '%s' in %s failed; will retry after a short "
'nap...' % (' '.join('"%s"' % x for x in args), run_cwd))
time.sleep(sleep_interval)
sleep_interval *= 2
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
class GitFilter(object):
"""A filter_fn implementation for quieting down git output messages.
Allows a custom function to skip certain lines (predicate), and will throttle
the output of percentage completed lines to only output every X seconds.
"""
PERCENT_RE = re.compile('(.*) ([0-9]{1,3})% .*')
def __init__(self, time_throttle=0, predicate=None, out_fh=None):
"""
Args:
time_throttle (int): GitFilter will throttle 'noisy' output (such as the
XX% complete messages) to only be printed at least |time_throttle|
seconds apart.
predicate (f(line)): An optional function which is invoked for every line.
The line will be skipped if predicate(line) returns False.
out_fh: File handle to write output to.
"""
self.last_time = 0
self.time_throttle = time_throttle
self.predicate = predicate
self.out_fh = out_fh or sys.stdout
self.progress_prefix = None
def __call__(self, line):
# git uses an escape sequence to clear the line; elide it.
esc = line.find(unichr(033))
if esc > -1:
line = line[:esc]
if self.predicate and not self.predicate(line):
return
now = time.time()
match = self.PERCENT_RE.match(line)
if match:
if match.group(1) != self.progress_prefix:
self.progress_prefix = match.group(1)
elif now - self.last_time < self.time_throttle:
return
self.last_time = now
self.out_fh.write('[%s] ' % Elapsed())
print >> self.out_fh, line
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetMacWinOrLinux():
"""Returns 'mac', 'win', or 'linux', matching the current platform."""
if sys.platform.startswith(('cygwin', 'win')):
return 'win'
elif sys.platform.startswith('linux'):
return 'linux'
elif sys.platform == 'darwin':
return 'mac'
raise Error('Unknown platform: ' + sys.platform)
def GetBuildtoolsPath():
"""Returns the full path to the buildtools directory.
This is based on the root of the checkout containing the current directory."""
# Overriding the build tools path by environment is highly unsupported and may
# break without warning. Do not rely on this for anything important.
override = os.environ.get('CHROMIUM_BUILDTOOLS_PATH')
if override is not None:
return override
gclient_root = FindGclientRoot(os.getcwd())
if not gclient_root:
# Some projects might not use .gclient. Try to see whether we're in a git
# checkout.
top_dir = [os.getcwd()]
def filter_fn(line):
top_dir[0] = os.path.normpath(line.rstrip('\n'))
try:
CheckCallAndFilter(["git", "rev-parse", "--show-toplevel"],
print_stdout=False, filter_fn=filter_fn)
except Exception:
pass
top_dir = top_dir[0]
if os.path.exists(os.path.join(top_dir, 'buildtools')):
return os.path.join(top_dir, 'buildtools')
return None
# Some projects' top directory is not named 'src'.
source_dir_name = GetGClientPrimarySolutionName(gclient_root) or 'src'
return os.path.join(gclient_root, source_dir_name, 'buildtools')
def GetBuildtoolsPlatformBinaryPath():
"""Returns the full path to the binary directory for the current platform."""
# Mac and Windows just have one directory, Linux has two according to whether
# it's 32 or 64 bits.
buildtools_path = GetBuildtoolsPath()
if not buildtools_path:
return None
if sys.platform.startswith(('cygwin', 'win')):
subdir = 'win'
elif sys.platform == 'darwin':
subdir = 'mac'
elif sys.platform.startswith('linux'):
if sys.maxsize > 2**32:
subdir = 'linux64'
else:
subdir = 'linux32'
else:
raise Error('Unknown platform: ' + sys.platform)
return os.path.join(buildtools_path, subdir)
def GetExeSuffix():
"""Returns '' or '.exe' depending on how executables work on this platform."""
if sys.platform.startswith(('cygwin', 'win')):
return '.exe'
return ''
def GetGClientPrimarySolutionName(gclient_root_dir_path):
"""Returns the name of the primary solution in the .gclient file specified."""
gclient_config_file = os.path.join(gclient_root_dir_path, '.gclient')
env = {}
execfile(gclient_config_file, env)
solutions = env.get('solutions', [])
if solutions:
return solutions[0].get('name')
return None
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
self.outbuf = cStringIO.StringIO()
self.start = self.finish = None
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress, ignore_requirements, verbose=False):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
self.ignore_requirements = ignore_requirements
self.verbose = verbose
self.last_join = None
self.last_subproc_output = None
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
if self.jobs == 1:
total += 1
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def out_cb(self, _):
self.last_subproc_output = datetime.datetime.now()
return True
@staticmethod
def format_task_output(task, comment=''):
if comment:
comment = ' (%s)' % comment
if task.start and task.finish:
elapsed = ' (Elapsed: %s)' % (
str(task.finish - task.start).partition('.')[0])
else:
elapsed = ''
return """
%s%s%s
----------------------------------------
%s
----------------------------------------""" % (
task.name, comment, elapsed, task.outbuf.getvalue().strip())
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.last_subproc_output = self.last_join = datetime.datetime.now()
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
if (self.ignore_requirements or
not (set(self.queued[i].requirements) - set(self.ran))):
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
# If we haven't printed to terminal for a while, but we have received
# spew from a suprocess, let the user know we're still progressing.
now = datetime.datetime.now()
if (now - self.last_join > datetime.timedelta(seconds=60) and
self.last_subproc_output > self.last_join):
if self.progress:
print >> sys.stdout, ''
sys.stdout.flush()
elapsed = Elapsed()
print >> sys.stdout, '[%s] Still working on:' % elapsed
sys.stdout.flush()
for task in self.running:
print >> sys.stdout, '[%s] %s' % (elapsed, task.item.name)
sys.stdout.flush()
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s (not started): %s' % (
i.name, ', '.join(i.requirements))
for i in self.running:
print >> sys.stderr, self.format_task_output(i.item, 'interrupted')
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
if self.progress:
print >> sys.stdout, ''
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e, task = self.exceptions.get()
print >> sys.stderr, self.format_task_output(task.item, 'ERROR')
raise e[0], e[1], e[2]
elif self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
self.last_join = datetime.datetime.now()
sys.stdout.flush()
if self.verbose:
print >> sys.stdout, self.format_task_output(t.item)
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
try:
task_item.start = datetime.datetime.now()
print >> task_item.outbuf, '[%s] Started.' % Elapsed(task_item.start)
task_item.run(*args, **kwargs)
task_item.finish = datetime.datetime.now()
print >> task_item.outbuf, '[%s] Finished.' % Elapsed(task_item.finish)
self.ran.append(task_item.name)
if self.verbose:
if self.progress:
print >> sys.stdout, ''
print >> sys.stdout, self.format_task_output(task_item)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
except KeyboardInterrupt:
print >> sys.stderr, self.format_task_output(task_item, 'interrupted')
raise
except Exception:
print >> sys.stderr, self.format_task_output(task_item, 'ERROR')
raise
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
self.daemon = True
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.start = datetime.datetime.now()
print >> self.item.outbuf, '[%s] Started.' % Elapsed(self.item.start)
self.item.run(*self.args, **self.kwargs)
self.item.finish = datetime.datetime.now()
print >> self.item.outbuf, '[%s] Finished.' % Elapsed(self.item.finish)
except KeyboardInterrupt:
logging.info('Caught KeyboardInterrupt in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put((sys.exc_info(), self))
raise
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put((sys.exc_info(), self))
finally:
logging.info('_Worker.run(%s) done', self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git, git_editor=None):
"""Returns the most plausible editor to use.
In order of preference:
- GIT_EDITOR/SVN_EDITOR environment variable
- core.editor git configuration variable (if supplied by git-cl)
- VISUAL environment variable
- EDITOR environment variable
- vi (non-Windows) or notepad (Windows)
In the case of git-cl, this matches git's behaviour, except that it does not
include dumb terminal detection.
In the case of gcl, this matches svn's behaviour, except that it does not
accept a command-line flag or check the editor-cmd configuration variable.
"""
if git:
editor = os.environ.get('GIT_EDITOR') or git_editor
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('VISUAL')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vi'
return editor
def RunEditor(content, git, git_editor=None):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True, prefix='cl_description')
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
editor = GetEditor(git, git_editor=git_editor)
if not editor:
return None
cmd = '%s %s' % (editor, filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
def NumLocalCpus():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
# Mac OS 10.6 only
# pylint: disable=E1101
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
def DefaultDeltaBaseCacheLimit():
"""Return a reasonable default for the git config core.deltaBaseCacheLimit.
The primary constraint is the address space of virtual memory. The cache
size limit is per-thread, and 32-bit systems can hit OOM errors if this
parameter is set too high.
"""
if platform.architecture()[0].startswith('64'):
return '2g'
else:
return '512m'
def DefaultIndexPackConfig(url=''):
"""Return reasonable default values for configuring git-index-pack.
Experiments suggest that higher values for pack.threads don't improve
performance."""
cache_limit = DefaultDeltaBaseCacheLimit()
result = ['-c', 'core.deltaBaseCacheLimit=%s' % cache_limit]
if url in THREADED_INDEX_PACK_BLACKLIST:
result.extend(['-c', 'pack.threads=1'])
return result
| |
from __future__ import annotations
from typing import cast
import warnings
import numpy as np
from pandas._libs.lib import (
NoDefault,
no_default,
)
from pandas._libs.missing import is_matching_na
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import (
safe_sort,
take_nd,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.core.arrays.string_ import StringDtype
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: bool | str = "equiv",
check_less_precise: bool | int | NoDefault = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
# if we have "equiv", this becomes True
check_dtype = bool(check_dtype)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _get_tol_from_less_precise(check_less_precise: bool | int) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: bool | str = "equiv",
check_names: bool = True,
check_less_precise: bool | int | NoDefault = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> tm.assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index") -> None:
if not exact:
return
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal("inferred_type", left, right, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if is_categorical_dtype(left.dtype) and is_categorical_dtype(right.dtype):
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
assert_index_equal(left.categories, right.categories, exact=exact)
return
assert_attr_equal("dtype", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_nd(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = Index(safe_sort(left))
right = Index(safe_sort(right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
# if we have "equiv", this becomes True
exact_bool = bool(exact)
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact_bool,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: bool | str = True, obj="Input"):
"""
Checks classes are equal.
"""
from pandas.core.indexes.numeric import NumericIndex
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if type(left) == type(right):
return
if exact == "equiv":
# accept equivalence of NumericIndex (sub-)classes
if isinstance(left, NumericIndex) and isinstance(right, NumericIndex):
return
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif is_matching_na(left_attr, right_attr):
# e.g. both np.nan, both NaT, both pd.NA, ...
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if (left_attr is pd.NA) ^ (right_attr is pd.NA):
result = False
elif not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif (
is_categorical_dtype(left)
or isinstance(left, PandasDtype)
or isinstance(left, StringDtype)
):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif (
is_categorical_dtype(right)
or isinstance(right, PandasDtype)
or isinstance(right, StringDtype)
):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> tm.assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
*,
check_index=True,
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
check_index : bool, default True
Whether to check index equivalence. If False, then compare only values.
.. versionadded:: 1.3.0
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> tm.assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
if check_index:
# GH #38183
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
left_values = left._values
right_values = right._values
# Only check exact if dtype is numeric
if isinstance(left_values, ExtensionArray) and isinstance(
right_values, ExtensionArray
):
assert_extension_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
assert_numpy_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
# GH #38183
# use check_index=False, because we do not want to run
# assert_index_equal for each column,
# as we already checked it for the whole dataframe before.
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
check_index=False,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import urlparse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from keystoneclient import service_catalog
from keystoneclient.v2_0 import client as keystone_client
from keystoneclient.v2_0 import tokens
from horizon.api import base
from horizon import exceptions
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
class Service(base.APIDictWrapper):
""" Wrapper for a dict based on the service data from keystone. """
_attrs = ['id', 'type', 'name']
def __init__(self, service, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.url = service['endpoints'][0]['internalURL']
self.host = urlparse.urlparse(self.url).hostname
self.region = service['endpoints'][0]['region']
self.disabled = None
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type,
"backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
return base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
return request.session.get('region_endpoint',
getattr(settings, 'OPENSTACK_KEYSTONE_URL'))
def keystoneclient(request, username=None, password=None, tenant_id=None,
token_id=None, endpoint=None, endpoint_type=None,
admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not user.is_admin():
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystone_admin" if admin else "_keystone"
if hasattr(request, cache_attr) and (not token_id
or getattr(request, cache_attr).auth_token == token_id):
LOG.debug("Using cached client for token: %s" % user.token)
conn = getattr(request, cache_attr)
else:
endpoint_lookup = _get_endpoint_url(request, endpoint_type)
auth_url = endpoint or endpoint_lookup
LOG.debug("Creating a new keystoneclient connection to %s." % auth_url)
conn = keystone_client.Client(username=username or user.username,
password=password,
tenant_id=tenant_id or user.tenant_id,
token=token_id or user.token,
auth_url=auth_url,
endpoint=endpoint)
setattr(request, cache_attr, conn)
# Fetch the correct endpoint if we've re-scoped the token.
catalog = getattr(conn, 'service_catalog', None)
if catalog and "serviceCatalog" in catalog.catalog.keys():
catalog = catalog.catalog['serviceCatalog']
endpoint = _get_endpoint_url(request, endpoint_type, catalog)
conn.management_url = endpoint
return conn
def tenant_name(request, tenant_id):
return keystoneclient(request).tenants.get(tenant_id).name
def tenant_create(request, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.create(tenant_name,
description,
enabled)
def tenant_get(request, tenant_id, admin=False):
return keystoneclient(request, admin=admin).tenants.get(tenant_id)
def tenant_delete(request, tenant_id):
keystoneclient(request, admin=True).tenants.delete(tenant_id)
def tenant_list(request, admin=False):
return keystoneclient(request, admin=admin).tenants.list()
def tenant_update(request, tenant_id, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.update(tenant_id,
tenant_name,
description,
enabled)
def tenant_list_for_token(request, token, endpoint_type=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c = keystoneclient(request,
token_id=token,
endpoint=_get_endpoint_url(request, endpoint_type),
endpoint_type=endpoint_type)
return c.tenants.list()
def token_create(request, tenant, username, password):
'''
Creates a token using the username and password provided. If tenant
is provided it will retrieve a scoped token and the service catalog for
the given tenant. Otherwise it will return an unscoped token and without
a service catalog.
'''
c = keystoneclient(request,
username=username,
password=password,
tenant_id=tenant,
endpoint=_get_endpoint_url(request, 'internalURL'))
token = c.tokens.authenticate(username=username,
password=password,
tenant_id=tenant)
return token
def token_create_scoped(request, tenant, token):
'''
Creates a scoped token using the tenant id and unscoped token; retrieves
the service catalog for the given tenant.
'''
if hasattr(request, '_keystone'):
del request._keystone
c = keystoneclient(request,
tenant_id=tenant,
token_id=token,
endpoint=_get_endpoint_url(request, 'internalURL'))
raw_token = c.tokens.authenticate(tenant_id=tenant,
token=token,
return_raw=True)
c.service_catalog = service_catalog.ServiceCatalog(raw_token)
if request.user.is_admin():
c.management_url = c.service_catalog.url_for(service_type='identity',
endpoint_type='adminURL')
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c.management_url = c.service_catalog.url_for(
service_type='identity', endpoint_type=endpoint_type)
scoped_token = tokens.Token(tokens.TokenManager, raw_token)
return scoped_token
def user_list(request, tenant_id=None):
return keystoneclient(request, admin=True).users.list(tenant_id=tenant_id)
def user_create(request, user_id, email, password, tenant_id, enabled):
return keystoneclient(request, admin=True).users.create(user_id,
password,
email,
tenant_id,
enabled)
def user_delete(request, user_id):
keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
return keystoneclient(request, admin=admin).users.get(user_id)
def user_update(request, user, **data):
return keystoneclient(request, admin=True).users.update(user, **data)
def user_update_enabled(request, user_id, enabled):
return keystoneclient(request, admin=True).users.update_enabled(user_id,
enabled)
def user_update_password(request, user_id, password, admin=True):
return keystoneclient(request, admin=admin).users.update_password(user_id,
password)
def user_update_tenant(request, user_id, tenant_id, admin=True):
return keystoneclient(request, admin=admin).users.update_tenant(user_id,
tenant_id)
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request, admin=True).roles.list()
def add_tenant_user_role(request, tenant_id, user_id, role_id):
""" Adds a role for a user on a tenant. """
return keystoneclient(request, admin=True).roles.add_user_role(user_id,
role_id,
tenant_id)
def remove_tenant_user(request, tenant_id, user_id):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user_id, tenant_id)
for role in roles:
client.roles.remove_user_role(user_id, role.id, tenant_id)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except:
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def list_ec2_credentials(request, user_id):
return keystoneclient(request).ec2.list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return keystoneclient(request).ec2.create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return keystoneclient(request).ec2.get(user_id, access_token)
def keystone_can_edit_user():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['can_edit_user']
else:
return False
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
| |
from __future__ import print_function
import os
import sys
from six.moves import range
md = os.path.abspath(os.path.split(__file__)[0])
sys.path = [os.path.join(md, '..', '..', 'util')] + sys.path
dataFile = "../../atlas/CochlearNucleus/images/cochlear_nucleus.ma"
labelFile = "../../atlas/CochlearNucleus/images/cochlear_nucleus_label.ma"
from acq4.util import Qt
import pyqtgraph as pg
import numpy as np
from pyqtgraph.metaarray import MetaArray
Ui_Form = Qt.importTemplate('.builderTemplate')
if callable(getattr(Qt.QApplication, "setGraphicsSystem", None)):
Qt.QApplication.setGraphicsSystem('raster')
app = Qt.QApplication([])
win = Qt.QMainWindow()
cw = Qt.QWidget()
win.setCentralWidget(cw)
ui = Ui_Form()
ui.setupUi(cw)
win.show()
win.resize(800,600)
ui.labelTree.header().setSectionResizeMode(Qt.QHeaderView.ResizeToContents)
data = MetaArray(file=dataFile, mmap=True)
## data must have axes (anterior, dorsal, right)
if not os.path.exists(labelFile):
label = MetaArray(np.zeros(data.shape[:-1], dtype=np.uint16), info=data.infoCopy()[:3] + [{'labels': {}}])
label.write(labelFile, mappable=True)
label = MetaArray(file=labelFile, mmap=True, writable=True)
labelCache = None
labelInfo = {}
#ui.view.enableMouse()
#ui.view.setAspectLocked(True)
vb = pg.ViewBox()
ui.view.setCentralItem(vb)
vb.setAspectLocked(True)
vb.invertY(False)
dataImg = pg.ImageItem()
labelImg = pg.ImageItem() # mode=Qt.QPainter.CompositionMode_Plus)
#labelImg.setCompositionMode(Qt.QPainter.CompositionMode_Overlay)
labelImg.setZValue(10)
labelImg.setOpacity(1)
vb.addItem(dataImg)
vb.addItem(labelImg)
def connectSignals():
for r in [ui.rightRadio, ui.dorsalRadio, ui.rostralRadio]:
r.toggled.connect(imageChanged)
ui.zSlider.valueChanged.connect(updateImage)
ui.radiusSpin.valueChanged.connect(updateKernel)
ui.greyCheck.toggled.connect(updateImage)
ui.labelSlider.valueChanged.connect(imageChanged)
ui.labelTree.itemChanged.connect(itemChanged)
ui.labelTree.currentItemChanged.connect(itemSelected)
ui.overlayCheck.toggled.connect(overlayToggled)
def init():
connectSignals()
updateKernel()
labelData = label._info[-1]['labels']
d = dict([(x['id'], x) for x in labelData])
keys = list(d.keys())
keys.sort()
for k in keys:
addLabel(d[k])
def keyPressEvent(ev):
k = ev.key()
mod = ev.modifiers()
if k == Qt.Qt.Key_Right:
if mod & Qt.Qt.ControlModifier:
copyLabel(1)
ui.zSlider.setValue(ui.zSlider.value()+1)
elif k == Qt.Qt.Key_Left:
if mod & Qt.Qt.ControlModifier:
copyLabel(-1)
ui.zSlider.setValue(ui.zSlider.value()-1)
elif k == Qt.Qt.Key_Equal:
ui.radiusSpin.setValue(ui.radiusSpin.value()+1)
elif k == Qt.Qt.Key_Minus:
ui.radiusSpin.setValue(ui.radiusSpin.value()-1)
elif k == Qt.Qt.Key_Space:
if labelImg.isVisible():
labelImg.setVisible(False)
else:
updateLabelImage()
labelImg.setVisible(True)
elif k == Qt.Qt.Key_G:
ui.greyCheck.toggle()
else:
ev.ignore()
cw.keyPressEvent = keyPressEvent
currentPos = [0,0,0]
zAxis = 0
def draw(src, dst, mask, srcSlice, dstSlice, ev):
addLabel()
#p = debug.Profiler('draw', disabled=True)
l = displayLabel.view(np.ndarray)[ui.zSlider.value()]
#p.mark('1')
mod = ev.modifiers()
mask = mask[srcSlice]
src = src[srcSlice].astype(l.dtype)
if mod & Qt.Qt.ShiftModifier:
#src = 1-src
l[dstSlice] &= ~(src * 2**ui.labelSpin.value())
#l[dstSlice] = l[dstSlice] * (1-mask) + src * mask
#p.mark('2')
else:
l[dstSlice] |= src * 2**ui.labelSpin.value()
#p.mark('3')
updateLabelImage(dstSlice)
#p.mark('4')
#p.finish()
def addLabel(info=None):
global labelInfo
create = False
if info is None:
create = True
l = ui.labelSpin.value()
if l in labelInfo:
return
info = {
'visible': True,
'name': 'label',
'color': pg.intColor(len(labelInfo), 16),
'id': l
}
else:
info = info.copy()
info['color'] = pg.mkColor(info['color'])
l = info['id']
item = Qt.QTreeWidgetItem([str(l), info['name'], ''])
item.setFlags(item.flags() | Qt.Qt.ItemIsEditable | Qt.Qt.ItemIsUserCheckable)
if info['visible']:
item.setCheckState(0, Qt.Qt.Checked)
else:
item.setCheckState(0, Qt.Qt.Unchecked)
btn = pg.ColorButton(color=info['color'])
ui.labelTree.addTopLevelItem(item)
ui.labelTree.setItemWidget(item, 2, btn)
labelInfo[l] = {'item': item, 'btn': btn}
btn.sigColorChanged.connect(itemChanged)
btn.sigColorChanging.connect(imageChanged)
if create:
writeMeta()
def overlayToggled(b):
if b:
labelImg.setCompositionMode(Qt.QPainter.CompositionMode_Overlay)
else:
labelImg.setCompositionMode(Qt.QPainter.CompositionMode_SourceOver)
updateImage()
def itemChanged(*args):
imageChanged()
writeMeta()
def writeMeta():
meta = []
for k, v in labelInfo.items():
meta.append( {
'id': k,
'name': str(v['item'].text(1)),
'color': pg.colorStr(v['btn'].color()),
'visible': v['item'].checkState(0) == Qt.Qt.Checked
} )
label._info[-1]['labels'] = meta
label.writeMeta(labelFile)
def itemSelected(item):
ui.labelTree.editItem(item, 1)
def copyLabel(n):
i1 = ui.zSlider.value()
i2 = i1 + n
if i2 < 0 or i2 > displayLabel.shape[0]:
return
#displayLabel[i2] &= ~mask
#displayLabel[i2] |= displayLabel[i1] & mask
mask = np.uint16(2**ui.labelSpin.value())
displayLabel[i2] = (displayLabel[i1] & mask) | (displayLabel[i2] & ~mask)
def updateImage():
currentPos[zAxis] = ui.zSlider.value()
if ui.greyCheck.isChecked():
img = displayData.view(np.ndarray)[ui.zSlider.value()].mean(axis=2)
else:
img = displayData.view(np.ndarray)[ui.zSlider.value()]
dataImg.setImage(img, levels=None)
#labelImg.updateImage(displayLabel.view(np.ndarray)[ui.zSlider.value()], copy=False, white=10, black=0)
if labelImg.isVisible():
updateLabelImage()
def renderLabels(z, sl=None, overlay=False):
#p = debug.Profiler('updateLabelImage', disabled=True)
if sl is None:
sl = (slice(None), slice(None))
l = displayLabel.view(np.ndarray)[z]
#p.mark('1')
lsl = l[sl]
img = np.empty(lsl.shape+(4,), dtype=np.uint16)
#img.fill(128)
img.fill(0)
val = ui.labelSlider.value()/128.
for k, v in labelInfo.items():
if not v['item'].checkState(0) == Qt.Qt.Checked:
continue
c = pg.colorTuple(v['btn'].color())
mask = (lsl&(2**k) > 0)
alpha = c[3]/255. * val
img[mask] *= 1.0 - alpha
img[...,0] += mask * int(c[0] * alpha)
img[...,1] += mask * int(c[1] * alpha)
img[...,2] += mask * int(c[2] * alpha)
#img[...,0] += mask * int(c[0] * val)
#img[...,1] += mask * int(c[1] * val)
#img[...,2] += mask * int(c[2] * val)
img[...,3] += mask * (alpha * 255)
if overlay:
img += 128
img = img.clip(0,255).astype(np.ubyte)
return img
def renderStack(overlay=True):
"""
Export label data as a 3D, RGB image
if overlay is True, multiply in the original data image
"""
stack = np.zeros(displayLabel.shape + (4,), dtype=np.ubyte)
with pg.ProgressDialog("Rendering label stack...", maximum=displayLabel.shape[0]) as dlg:
for z in range(displayLabel.shape[0]):
stack[z] = renderLabels(z)
if overlay: ## multiply colors, not alpha.
stack[z][..., :3] *= displayData[z].mean(axis=2)[..., np.newaxis].astype(float)/256.
print(z)
dlg += 1
if dlg.wasCanceled():
raise Exception("Stack render canceled.")
return stack
def renderVolume(stack, alpha=0.3, loss=0.01):
im = np.zeros(stack.shape[1:3]+(3,), dtype=float)
for z in range(stack.shape[0]):
sz = stack[z].astype(float) # -128
mask = sz.max(axis=2) > 0
szm = sz[mask]
alphaChan = szm[...,3:4]*alpha/256.
im *= (1.0-loss)
im[mask] *= 1.0-alphaChan
im[mask] += szm[...,:3] * alphaChan
#im[mask] *= (1.0-alpha)
#im[mask] += sz[mask] * alpha
print(z)
return im
def updateLabelImage(sl=None):
global labelCache
if labelCache is None: ## if we haven't cached a full frame, then the full frame must be rendered.
sl = (slice(None), slice(None))
img = renderLabels(ui.zSlider.value(), sl, overlay=ui.overlayCheck.isChecked())
if labelCache is None:
labelCache = img
labelImg.setImage(labelCache, levels=None)
else:
labelCache[sl] = img
labelImg.updateImage()
def imageChanged():
global zAxis, displayData, displayLabel, labelCache
labelCache = None
if ui.rightRadio.isChecked():
axes = ('right', 'anterior', 'dorsal')
zAxis = 0
elif ui.dorsalRadio.isChecked():
axes = ('dorsal', 'right', 'anterior')
zAxis = 1
else:
axes = ('anterior', 'right', 'dorsal')
zAxis = 2
displayData = data.transpose(axes)
displayLabel = label.transpose(axes).view(np.ndarray)
ui.zSlider.setMaximum(displayData.shape[0]-1)
ui.zSlider.setValue(currentPos[zAxis])
updateImage()
#vb.setRange(dataImg.boundingRect())
vb.autoRange()
def updateKernel():
global drawKernel
r = ui.radiusSpin.value()+1
d = (r*2) - 1
x = np.array([range(d)])
y = x.transpose()
drawKernel = (np.sqrt((x-r+1)**2 + (y-r+1)**2) < r-1).astype(np.ubyte)
labelImg.setDrawKernel(drawKernel, mask=drawKernel, center=(r-1,r-1), mode=draw)
init()
imageChanged()
| |
import requests
import logging
from concurrent.futures import ThreadPoolExecutor
from pybitx import __version__
import pandas as pd
import json
log = logging.getLogger(__name__)
# --------------------------- constants -----------------------
class BitXAPIError(ValueError):
def __init__(self, response):
self.url = response.url
self.code = response.status_code
self.message = response.text
def __str__(self):
return "BitX request %s failed with %d: %s" % (self.url, self.code, self.message)
class BitX:
def __init__(self, key, secret, options={}):
self.options = options
self.auth = (key, secret)
if 'hostname' in options:
self.hostname = options['hostname']
else:
self.hostname = 'api.mybitx.com'
self.port = options['port'] if 'port' in options else 443
self.pair = options['pair'] if 'pair' in options else 'XBTZAR'
self.ca = options['ca'] if 'ca' in options else None
self.timeout = options['timeout'] if 'timeout' in options else 30
# Use a Requests session so that we can keep headers and connections
# across API requests
self._requests_session = requests.Session()
self._requests_session.headers.update({
'Accept': 'application/json',
'Accept-Charset': 'utf-8',
'User-Agent': 'py-bitx v' + __version__
})
self._executor = ThreadPoolExecutor(max_workers=5)
def close(self):
log.info('Asking MultiThreadPool to shutdown')
self._executor.shutdown(wait=True)
log.info('MultiThreadPool has shutdown')
def construct_url(self, call):
base = self.hostname
if self.port != 443:
base += ':%d' % (self.port,)
return "https://%s/api/1/%s" % (base, call)
def api_request(self, call, params, kind='auth', http_call='get'):
"""
General API request. Generally, use the convenience functions below
:param kind: the type of request to make. 'auth' makes an authenticated call; 'basic' is unauthenticated
:param call: the API call to make
:param params: a dict of query parameters
:return: a json response, a BitXAPIError is thrown if the api returns with an error
"""
url = self.construct_url(call)
auth = self.auth if kind == 'auth' else None
if http_call == 'get':
response = self._requests_session.get(
url, params = params, auth = auth, timeout = self.timeout)
elif http_call == 'post':
response = self._requests_session.post(
url, data = params, auth = auth, timeout = self.timeout)
else:
raise ValueError('Invalid http_call parameter')
try:
result = response.json()
except ValueError:
result = {'error': 'No JSON content returned'}
if response.status_code != 200 or 'error' in result:
raise BitXAPIError(response)
else:
return result
def get_ticker(self, kind='auth'):
params = {'pair': self.pair}
return self.api_request('ticker', params, kind=kind)
def get_all_tickers(self, kind='auth'):
return self.api_request('tickers', None, kind=kind)
def get_order_book(self, limit=None, kind='auth'):
params = {'pair': self.pair}
orders = self.api_request('orderbook', params, kind=kind)
if limit is not None:
orders['bids'] = orders['bids'][:limit]
orders['asks'] = orders['asks'][:limit]
return orders
def get_order_book_frame(self, limit=None, kind='auth'):
q = self.get_order_book(limit, kind)
asks = pd.DataFrame(q['asks'])
bids = pd.DataFrame(q['bids'])
index = pd.MultiIndex.from_product([('asks', 'bids'),('price', 'volume')])
df = pd.DataFrame(pd.concat([asks, bids], axis=1).values, columns=index)
return df
def get_trades(self, limit=None, kind='auth'):
params = {'pair': self.pair}
trades = self.api_request('trades', params, kind=kind)
if limit is not None:
trades['trades'] = trades['trades'][:limit]
return trades
def get_trades_frame(self, limit=None, kind='auth'):
trades = self.get_trades(limit, kind)
df = pd.DataFrame(trades['trades'])
df.index = pd.to_datetime(df.timestamp * 1e-3, unit='s')
df.drop('timestamp', axis=1, inplace=True)
return df
def get_orders(self, state=None, kind='auth'):
"""
Returns a list of the most recently placed orders. You can specify an optional state='PENDING' parameter to
restrict the results to only open orders. You can also specify the market by using the optional pair parameter.
The list is truncated after 100 items.
:param kind: typically 'auth' if you want this to return anything useful
:param state: String optional 'COMPLETE', 'PENDING', or None (default)
:return:
"""
params = {'pair': self.pair}
if state is not None:
params['state'] = state
return self.api_request('listorders', params, kind=kind)
def get_order(self, order_id):
"""
Get an order by its ID
:param order_id: string The order ID
:return: dict order details or BitXAPIError raised
"""
return self.api_request('orders/%s' % (order_id,), None)
def get_orders_frame(self, state=None, kind='auth'):
q = self.get_orders(state, kind)
tj = json.dumps(q['orders'])
df = pd.read_json(tj, convert_dates=['creation_timestamp', 'expiration_timestamp'])
df.index = df.creation_timestamp
return df
def create_limit_order(self, order_type, volume, price):
"""
Create a new limit order
:param order_type: 'buy' or 'sell'
:param volume: the volume, in BTC
:param price: the ZAR price per bitcoin
:return: the order id
"""
data = {
'pair': self.pair,
'type': 'BID' if order_type == 'buy' else 'ASK',
'volume': str(volume),
'price': str(price)
}
result = self.api_request('postorder', params=data, http_call='post')
return result
def stop_order(self, order_id):
"""
Create a new limit order
:param order_id: The order ID
:return: a success flag
"""
data = {
'order_id': order_id,
}
return self.api_request('stoporder', params=data, http_call='post')
def stop_all_orders(self):
"""
Stops all pending orders, both sell and buy
:return: dict of Boolean -- whether request succeeded or not for each order_id that was pending
"""
pending = self.get_orders('PENDING')['orders']
ids = [order['order_id'] for order in pending]
result = {}
for order_id in ids:
status = self.stop_order(order_id)
result[order_id] = status['success']
return result
def get_funding_address(self, asset):
"""
Returns the default receive address associated with your account and the amount received via the address. You
can specify an optional address parameter to return information for a non-default receive address. In the
response, total_received is the total confirmed Bitcoin amount received excluding unconfirmed transactions.
total_unconfirmed is the total sum of unconfirmed receive transactions.
:param asset: For now, only XBT is valid
:return: dict
"""
return self.api_request('funding_address', {'asset': asset})
def get_withdrawals_status(self, wid=None):
"""
:param wid: String. Optional withdrawal id. None queries for all ids
:return:
"""
call = 'withdrawals'
if wid is not None:
call += '/%s' % (wid,)
return self.api_request(call, None)
def get_balance(self):
return self.api_request('balance', None)
def get_transactions(self, account_id, min_row=None, max_row=None):
params = {}
if min_row is not None:
params['min_row'] = min_row
if max_row is not None:
params['max_row'] = max_row
return self.api_request('accounts/%s/transactions' % (account_id,), params)
def get_transactions_frame(self, account_id, min_row=None, max_row=None):
tx = self.get_transactions(account_id, min_row, max_row)['transactions']
df = pd.DataFrame(tx)
df.index = pd.to_datetime(df.timestamp, unit='ms')
df.drop('timestamp', axis=1, inplace=True)
return df
def get_pending_transactions(self, account_id):
return self.api_request('accounts/%s/pending' % (account_id,), None)
| |
"""Manifest validation."""
from __future__ import annotations
from pathlib import Path
from urllib.parse import urlparse
from awesomeversion import (
AwesomeVersion,
AwesomeVersionException,
AwesomeVersionStrategy,
)
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.helpers import config_validation as cv
from .model import Config, Integration
DOCUMENTATION_URL_SCHEMA = "https"
DOCUMENTATION_URL_HOST = "www.home-assistant.io"
DOCUMENTATION_URL_PATH_PREFIX = "/integrations/"
DOCUMENTATION_URL_EXCEPTIONS = {"https://www.home-assistant.io/hassio"}
SUPPORTED_QUALITY_SCALES = ["gold", "internal", "platinum", "silver"]
SUPPORTED_IOT_CLASSES = [
"assumed_state",
"calculated",
"cloud_polling",
"cloud_push",
"local_polling",
"local_push",
]
# List of integrations that are supposed to have no IoT class
NO_IOT_CLASS = [
"air_quality",
"alarm_control_panel",
"api",
"auth",
"automation",
"binary_sensor",
"blueprint",
"button",
"calendar",
"camera",
"climate",
"color_extractor",
"config",
"configurator",
"counter",
"cover",
"default_config",
"device_automation",
"device_tracker",
"discovery",
"downloader",
"fan",
"ffmpeg",
"frontend",
"geo_location",
"history",
"homeassistant",
"humidifier",
"image_processing",
"image",
"input_boolean",
"input_button",
"input_datetime",
"input_number",
"input_select",
"input_text",
"intent_script",
"intent",
"light",
"lock",
"logbook",
"logger",
"lovelace",
"mailbox",
"map",
"media_player",
"media_source",
"my",
"notify",
"number",
"onboarding",
"panel_custom",
"panel_iframe",
"plant",
"profiler",
"proxy",
"python_script",
"remote",
"safe_mode",
"scene",
"script",
"search",
"select",
"sensor",
"siren",
"stt",
"switch",
"system_health",
"system_log",
"tag",
"timer",
"trace",
"tts",
"vacuum",
"water_heater",
"weather",
"webhook",
"websocket_api",
"zone",
]
def documentation_url(value: str) -> str:
"""Validate that a documentation url has the correct path and domain."""
if value in DOCUMENTATION_URL_EXCEPTIONS:
return value
parsed_url = urlparse(value)
if parsed_url.scheme != DOCUMENTATION_URL_SCHEMA:
raise vol.Invalid("Documentation url is not prefixed with https")
if parsed_url.netloc == DOCUMENTATION_URL_HOST and not parsed_url.path.startswith(
DOCUMENTATION_URL_PATH_PREFIX
):
raise vol.Invalid(
"Documentation url does not begin with www.home-assistant.io/integrations"
)
return value
def verify_lowercase(value: str):
"""Verify a value is lowercase."""
if value.lower() != value:
raise vol.Invalid("Value needs to be lowercase")
return value
def verify_uppercase(value: str):
"""Verify a value is uppercase."""
if value.upper() != value:
raise vol.Invalid("Value needs to be uppercase")
return value
def verify_version(value: str):
"""Verify the version."""
try:
AwesomeVersion(
value,
[
AwesomeVersionStrategy.CALVER,
AwesomeVersionStrategy.SEMVER,
AwesomeVersionStrategy.SIMPLEVER,
AwesomeVersionStrategy.BUILDVER,
AwesomeVersionStrategy.PEP440,
],
)
except AwesomeVersionException:
raise vol.Invalid(f"'{value}' is not a valid version.")
return value
def verify_wildcard(value: str):
"""Verify the matcher contains a wildcard."""
if "*" not in value:
raise vol.Invalid(f"'{value}' needs to contain a wildcard matcher")
return value
MANIFEST_SCHEMA = vol.Schema(
{
vol.Required("domain"): str,
vol.Required("name"): str,
vol.Optional("config_flow"): bool,
vol.Optional("mqtt"): [str],
vol.Optional("zeroconf"): [
vol.Any(
str,
vol.All(
cv.deprecated("macaddress"),
cv.deprecated("model"),
cv.deprecated("manufacturer"),
vol.Schema(
{
vol.Required("type"): str,
vol.Optional("macaddress"): vol.All(
str, verify_uppercase, verify_wildcard
),
vol.Optional("manufacturer"): vol.All(
str, verify_lowercase
),
vol.Optional("model"): vol.All(str, verify_lowercase),
vol.Optional("name"): vol.All(str, verify_lowercase),
vol.Optional("properties"): vol.Schema(
{str: verify_lowercase}
),
}
),
),
)
],
vol.Optional("ssdp"): vol.Schema(
vol.All([vol.All(vol.Schema({}, extra=vol.ALLOW_EXTRA), vol.Length(min=1))])
),
vol.Optional("homekit"): vol.Schema({vol.Optional("models"): [str]}),
vol.Optional("dhcp"): [
vol.Schema(
{
vol.Optional("macaddress"): vol.All(
str, verify_uppercase, verify_wildcard
),
vol.Optional("hostname"): vol.All(str, verify_lowercase),
}
)
],
vol.Optional("usb"): [
vol.Schema(
{
vol.Optional("vid"): vol.All(str, verify_uppercase),
vol.Optional("pid"): vol.All(str, verify_uppercase),
vol.Optional("serial_number"): vol.All(str, verify_lowercase),
vol.Optional("manufacturer"): vol.All(str, verify_lowercase),
vol.Optional("description"): vol.All(str, verify_lowercase),
vol.Optional("known_devices"): [str],
}
)
],
vol.Required("documentation"): vol.All(
vol.Url(), documentation_url # pylint: disable=no-value-for-parameter
),
vol.Optional(
"issue_tracker"
): vol.Url(), # pylint: disable=no-value-for-parameter
vol.Optional("quality_scale"): vol.In(SUPPORTED_QUALITY_SCALES),
vol.Optional("requirements"): [str],
vol.Optional("dependencies"): [str],
vol.Optional("after_dependencies"): [str],
vol.Required("codeowners"): [str],
vol.Optional("disabled"): str,
vol.Optional("iot_class"): vol.In(SUPPORTED_IOT_CLASSES),
}
)
CUSTOM_INTEGRATION_MANIFEST_SCHEMA = MANIFEST_SCHEMA.extend(
{
vol.Optional("version"): vol.All(str, verify_version),
}
)
def validate_version(integration: Integration):
"""
Validate the version of the integration.
Will be removed when the version key is no longer optional for custom integrations.
"""
if not integration.manifest.get("version"):
integration.add_error("manifest", "No 'version' key in the manifest file.")
return
def validate_manifest(integration: Integration, core_components_dir: Path) -> None:
"""Validate manifest."""
if not integration.manifest:
return
try:
if integration.core:
MANIFEST_SCHEMA(integration.manifest)
else:
CUSTOM_INTEGRATION_MANIFEST_SCHEMA(integration.manifest)
except vol.Invalid as err:
integration.add_error(
"manifest", f"Invalid manifest: {humanize_error(integration.manifest, err)}"
)
if integration.manifest["domain"] != integration.path.name:
integration.add_error("manifest", "Domain does not match dir name")
if (
not integration.core
and (core_components_dir / integration.manifest["domain"]).exists()
):
integration.add_warning(
"manifest", "Domain collides with built-in core integration"
)
if (
integration.manifest["domain"] in NO_IOT_CLASS
and "iot_class" in integration.manifest
):
integration.add_error("manifest", "Domain should not have an IoT Class")
if (
integration.manifest["domain"] not in NO_IOT_CLASS
and "iot_class" not in integration.manifest
):
integration.add_error("manifest", "Domain is missing an IoT Class")
if not integration.core:
validate_version(integration)
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Handle all integrations manifests."""
core_components_dir = config.root / "homeassistant/components"
for integration in integrations.values():
validate_manifest(integration, core_components_dir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.