gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import logging
from decimal import Decimal
from typing import Optional, List, Tuple
from waitlist.storage.database import Waitlist, ShipCheckCollection, ShipCheck,\
InvType, InvGroup, MarketGroup, WaitlistGroup
from waitlist.base import db
from waitlist.storage import modules
from waitlist.utility.constants import check_types
def add_default_sorting(collection: ShipCheckCollection, logi_wl, dps_wl, sniper_wl):
# how old WTM sorting worked was to first sort out T2 logi ships :>
check = ShipCheck(
checkName = 'SortOutLogiHulls',
checkTargetID = logi_wl.id,
checkType = check_types.SHIP_CHECK_TYPEID,
order = 0,
modifier = Decimal('1.00'),
checkTag = 'logi'
)
collection.checks.append(check)
for k, v in modules.logi_ships.items():
inv_type: InvType = db.session.query(InvType).get(k)
if inv_type is None:
print('ERROR NONE', inv_type)
check.ids.append(inv_type)
# check dps weapons
check = ShipCheck(
checkName = 'SortToDpsByWeapon',
checkTargetID=dps_wl.id,
checkType = check_types.MODULE_CHECK_TYPEID,
order = 1,
modifier = Decimal('1.00'),
checkTag = 'dps'
)
collection.checks.append(check)
for k, v in modules.dps_weapons.items():
inv_type = db.session.query(InvType).get(k)
if inv_type is None:
print('ERROR NONE', inv_type)
check.ids.append(inv_type)
# check sniper weapons
check = ShipCheck(
checkName = 'SortToSniperByWeapon',
checkTargetID = sniper_wl.id,
checkType = check_types.MODULE_CHECK_TYPEID,
order = 1,
modifier = Decimal('1.00'),
checkTag = 'sniper'
)
collection.checks.append(check)
for k, v in modules.sniper_weapons.items():
inv_type = db.session.query(InvType).get(k)
if inv_type is None:
print('ERROR NONE', inv_type)
check.ids.append(inv_type)
# check sniper by market group
check = ShipCheck(
checkName = 'SortToSniperByWeaponMarketGroup',
checkTargetID = sniper_wl.id,
checkType = check_types.MODULE_CHECK_MARKETGROUP,
order = 2,
modifier = Decimal('1.00'),
checkTag = 'sniper'
)
collection.checks.append(check)
for k, v in modules.weapongroups['sniper'].items():
grp = db.session.query(MarketGroup).get(v)
if grp is None:
print('ERROR NONE sniper weapon grps', v)
check.ids.append(grp)
# check dps by market group
check = ShipCheck(
checkName = 'SortToDpsByWeaponMarketGroup',
checkTargetID = dps_wl.id,
checkType = check_types.MODULE_CHECK_MARKETGROUP,
order = 2,
modifier = Decimal('1.00'),
checkTag = 'dps'
)
collection.checks.append(check)
for k, v in modules.weapongroups['dps'].items():
if v == 2432: # skip Entropic Disintigrators
continue
grp = db.session.query(MarketGroup).get(v)
if grp is None:
print('ERROR NONE dps weapongroups', v)
check.ids.append(grp)
# special rule for entropic disintigrators because it needs a higher modifier
check = ShipCheck(
checkName = 'SortToSniperEntropicDisintigrators',
checkTargetID = sniper_wl.id,
checkType = check_types.MODULE_CHECK_MARKETGROUP,
order = 2,
modifier = Decimal('4.00'),
checkTag = 'sniper'
)
collection.checks.append(check)
check.ids.append(db.session.query(MarketGroup).get(2432))
# check dps by ship_type
check = ShipCheck(
checkName = 'SortToDpsByShipType',
checkTargetID = dps_wl.id,
checkType = check_types.SHIP_CHECK_TYPEID,
order = 3,
modifier = Decimal('1.00'),
checkTag = 'dps'
)
collection.checks.append(check)
for k, v in modules.dps_ships.items():
inv_type = db.session.query(InvType).get(k)
if inv_type is None:
print('ERROR NONE', inv_type)
check.ids.append(inv_type)
# sniper ships by typeid
check = ShipCheck(
checkName = 'SortToSniperByShipType',
checkTargetID = sniper_wl.id,
checkType = check_types.SHIP_CHECK_TYPEID,
order = 3,
modifier = Decimal('1.00'),
checkTag = 'sniper'
)
collection.checks.append(check)
for k, v in modules.sniper_ships.items():
inv_type = db.session.query(InvType).get(k)
if inv_type is None:
print('ERROR NONE', inv_type)
check.ids.append(inv_type)
# dps ships by market group id
check = ShipCheck(
checkName = 'SortToDpsByInvGroup',
checkTargetID = dps_wl.id,
checkType = check_types.SHIP_CHECK_INVGROUP,
order = 4,
modifier = Decimal('1.00'),
checkTag = 'dps'
)
collection.checks.append(check)
for k, v in modules.dps_groups.items():
grp = db.session.query(InvGroup).get(k)
if grp is None:
print('ERROR NONE dps groups', k)
check.ids.append(grp)
# logiships by marketgroup
check = ShipCheck(
checkName = 'SortToLogiByInvGroup',
checkTargetID = dps_wl.id,
checkType = check_types.SHIP_CHECK_INVGROUP,
order = 4,
modifier = Decimal('1.00'),
checkTag = 'logi'
)
collection.checks.append(check)
for k, v in modules.logi_groups.items():
grp = db.session.query(InvGroup).get(k)
if grp is None:
print('ERROR NONE logi invgroups', k)
check.ids.append(grp)
if __name__ == '__main__':
waitlistGroup = db.session.query(WaitlistGroup).filter(WaitlistGroup.groupName == 'default').one()
logi_wl = None
dps_wl = None
sniper_wl = None
for wl in waitlistGroup.waitlists:
if wl.waitlistType == 'dps':
dps_wl = wl
if wl.waitlistType == 'logi':
logi_wl = wl
if wl.waitlistType == 'sniper':
sniper_wl = wl
collection = ShipCheckCollection(
checkCollectionName = 'HQAssignments',
waitlistGroupID = waitlistGroup.groupID,
defaultTargetID = dps_wl.id,
defaultTag = 'other'
)
db.session.add(collection)
add_default_sorting(collection, logi_wl, dps_wl, sniper_wl)
db.session.commit()
waitlistGroup = db.session.query(WaitlistGroup).filter(WaitlistGroup.groupName == 'assault').one()
logi_wl = None
dps_wl = None
sniper_wl = None
for wl in waitlistGroup.waitlists:
if wl.waitlistType == 'dps':
dps_wl = wl
if wl.waitlistType == 'logi':
logi_wl = wl
if wl.waitlistType == 'sniper':
sniper_wl = wl
collection = ShipCheckCollection(
checkCollectionName = 'AssaultAssignments',
waitlistGroupID = waitlistGroup.groupID,
defaultTargetID = dps_wl.id,
defaultTag = 'other'
)
db.session.add(collection)
add_default_sorting(collection, logi_wl, dps_wl, sniper_wl)
db.session.commit()
waitlistGroup = db.session.query(WaitlistGroup).filter(WaitlistGroup.groupName == 'vanguard').one()
logi_wl = None
dps_wl = None
sniper_wl = None
for wl in waitlistGroup.waitlists:
if wl.waitlistType == 'dps':
dps_wl = wl
if wl.waitlistType == 'logi':
logi_wl = wl
if wl.waitlistType == 'sniper':
sniper_wl = wl
collection = ShipCheckCollection(
checkCollectionName = 'VanguardAssignments',
waitlistGroupID = waitlistGroup.groupID,
defaultTargetID = dps_wl.id,
defaultTag = 'other'
)
db.session.add(collection)
add_default_sorting(collection, logi_wl, dps_wl, sniper_wl)
db.session.commit()
|
|
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflow_v2.services.conversation_profiles import pagers
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import conversation_profile
from google.cloud.dialogflow_v2.types import (
conversation_profile as gcd_conversation_profile,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ConversationProfilesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ConversationProfilesGrpcTransport
from .transports.grpc_asyncio import ConversationProfilesGrpcAsyncIOTransport
class ConversationProfilesClientMeta(type):
"""Metaclass for the ConversationProfiles client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversationProfilesTransport]]
_transport_registry["grpc"] = ConversationProfilesGrpcTransport
_transport_registry["grpc_asyncio"] = ConversationProfilesGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConversationProfilesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConversationProfilesClient(metaclass=ConversationProfilesClientMeta):
"""Service for managing
[ConversationProfiles][google.cloud.dialogflow.v2.ConversationProfile].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationProfilesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationProfilesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversationProfilesTransport:
"""Returns the transport used by the client instance.
Returns:
ConversationProfilesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def agent_path(project: str,) -> str:
"""Returns a fully-qualified agent string."""
return "projects/{project}/agent".format(project=project,)
@staticmethod
def parse_agent_path(path: str) -> Dict[str, str]:
"""Parses a agent path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/agent$", path)
return m.groupdict() if m else {}
@staticmethod
def conversation_model_path(
project: str, location: str, conversation_model: str,
) -> str:
"""Returns a fully-qualified conversation_model string."""
return "projects/{project}/locations/{location}/conversationModels/{conversation_model}".format(
project=project, location=location, conversation_model=conversation_model,
)
@staticmethod
def parse_conversation_model_path(path: str) -> Dict[str, str]:
"""Parses a conversation_model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/conversationModels/(?P<conversation_model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def conversation_profile_path(project: str, conversation_profile: str,) -> str:
"""Returns a fully-qualified conversation_profile string."""
return "projects/{project}/conversationProfiles/{conversation_profile}".format(
project=project, conversation_profile=conversation_profile,
)
@staticmethod
def parse_conversation_profile_path(path: str) -> Dict[str, str]:
"""Parses a conversation_profile path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/conversationProfiles/(?P<conversation_profile>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def cx_security_settings_path(
project: str, location: str, security_settings: str,
) -> str:
"""Returns a fully-qualified cx_security_settings string."""
return "projects/{project}/locations/{location}/securitySettings/{security_settings}".format(
project=project, location=location, security_settings=security_settings,
)
@staticmethod
def parse_cx_security_settings_path(path: str) -> Dict[str, str]:
"""Parses a cx_security_settings path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/securitySettings/(?P<security_settings>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def document_path(project: str, knowledge_base: str, document: str,) -> str:
"""Returns a fully-qualified document string."""
return "projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}".format(
project=project, knowledge_base=knowledge_base, document=document,
)
@staticmethod
def parse_document_path(path: str) -> Dict[str, str]:
"""Parses a document path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/knowledgeBases/(?P<knowledge_base>.+?)/documents/(?P<document>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def knowledge_base_path(project: str, knowledge_base: str,) -> str:
"""Returns a fully-qualified knowledge_base string."""
return "projects/{project}/knowledgeBases/{knowledge_base}".format(
project=project, knowledge_base=knowledge_base,
)
@staticmethod
def parse_knowledge_base_path(path: str) -> Dict[str, str]:
"""Parses a knowledge_base path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/knowledgeBases/(?P<knowledge_base>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ConversationProfilesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the conversation profiles client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ConversationProfilesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConversationProfilesTransport):
# transport is a ConversationProfilesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_conversation_profiles(
self,
request: Union[
conversation_profile.ListConversationProfilesRequest, dict
] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConversationProfilesPager:
r"""Returns the list of all conversation profiles in the
specified project.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_list_conversation_profiles():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
request = dialogflow_v2.ListConversationProfilesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_conversation_profiles(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.ListConversationProfilesRequest, dict]):
The request object. The request message for
[ConversationProfiles.ListConversationProfiles][google.cloud.dialogflow.v2.ConversationProfiles.ListConversationProfiles].
parent (str):
Required. The project to list all conversation profiles
from. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.conversation_profiles.pagers.ListConversationProfilesPager:
The response message for
[ConversationProfiles.ListConversationProfiles][google.cloud.dialogflow.v2.ConversationProfiles.ListConversationProfiles].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversation_profile.ListConversationProfilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversation_profile.ListConversationProfilesRequest
):
request = conversation_profile.ListConversationProfilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_conversation_profiles
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListConversationProfilesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_conversation_profile(
self,
request: Union[conversation_profile.GetConversationProfileRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversation_profile.ConversationProfile:
r"""Retrieves the specified conversation profile.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_get_conversation_profile():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
request = dialogflow_v2.GetConversationProfileRequest(
name="name_value",
)
# Make the request
response = client.get_conversation_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.GetConversationProfileRequest, dict]):
The request object. The request message for
[ConversationProfiles.GetConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.GetConversationProfile].
name (str):
Required. The resource name of the conversation profile.
Format:
``projects/<Project ID>/locations/<Location ID>/conversationProfiles/<Conversation Profile ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.ConversationProfile:
Defines the services to connect to
incoming Dialogflow conversations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversation_profile.GetConversationProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, conversation_profile.GetConversationProfileRequest):
request = conversation_profile.GetConversationProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_conversation_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_conversation_profile(
self,
request: Union[
gcd_conversation_profile.CreateConversationProfileRequest, dict
] = None,
*,
parent: str = None,
conversation_profile: gcd_conversation_profile.ConversationProfile = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_conversation_profile.ConversationProfile:
r"""Creates a conversation profile in the specified project.
[ConversationProfile.CreateTime][] and
[ConversationProfile.UpdateTime][] aren't populated in the
response. You can retrieve them via
[GetConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.GetConversationProfile]
API.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_create_conversation_profile():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
conversation_profile = dialogflow_v2.ConversationProfile()
conversation_profile.display_name = "display_name_value"
request = dialogflow_v2.CreateConversationProfileRequest(
parent="parent_value",
conversation_profile=conversation_profile,
)
# Make the request
response = client.create_conversation_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.CreateConversationProfileRequest, dict]):
The request object. The request message for
[ConversationProfiles.CreateConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.CreateConversationProfile].
parent (str):
Required. The project to create a conversation profile
for. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversation_profile (google.cloud.dialogflow_v2.types.ConversationProfile):
Required. The conversation profile to
create.
This corresponds to the ``conversation_profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.ConversationProfile:
Defines the services to connect to
incoming Dialogflow conversations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, conversation_profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_conversation_profile.CreateConversationProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_conversation_profile.CreateConversationProfileRequest
):
request = gcd_conversation_profile.CreateConversationProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if conversation_profile is not None:
request.conversation_profile = conversation_profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_conversation_profile
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_conversation_profile(
self,
request: Union[
gcd_conversation_profile.UpdateConversationProfileRequest, dict
] = None,
*,
conversation_profile: gcd_conversation_profile.ConversationProfile = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_conversation_profile.ConversationProfile:
r"""Updates the specified conversation profile.
[ConversationProfile.CreateTime][] and
[ConversationProfile.UpdateTime][] aren't populated in the
response. You can retrieve them via
[GetConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.GetConversationProfile]
API.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_update_conversation_profile():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
conversation_profile = dialogflow_v2.ConversationProfile()
conversation_profile.display_name = "display_name_value"
request = dialogflow_v2.UpdateConversationProfileRequest(
conversation_profile=conversation_profile,
)
# Make the request
response = client.update_conversation_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.UpdateConversationProfileRequest, dict]):
The request object. The request message for
[ConversationProfiles.UpdateConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.UpdateConversationProfile].
conversation_profile (google.cloud.dialogflow_v2.types.ConversationProfile):
Required. The conversation profile to
update.
This corresponds to the ``conversation_profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which
fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.ConversationProfile:
Defines the services to connect to
incoming Dialogflow conversations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([conversation_profile, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_conversation_profile.UpdateConversationProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_conversation_profile.UpdateConversationProfileRequest
):
request = gcd_conversation_profile.UpdateConversationProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if conversation_profile is not None:
request.conversation_profile = conversation_profile
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_conversation_profile
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("conversation_profile.name", request.conversation_profile.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_conversation_profile(
self,
request: Union[
conversation_profile.DeleteConversationProfileRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified conversation profile.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_delete_conversation_profile():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteConversationProfileRequest(
name="name_value",
)
# Make the request
client.delete_conversation_profile(request=request)
Args:
request (Union[google.cloud.dialogflow_v2.types.DeleteConversationProfileRequest, dict]):
The request object. The request message for
[ConversationProfiles.DeleteConversationProfile][google.cloud.dialogflow.v2.ConversationProfiles.DeleteConversationProfile].
This operation fails if the conversation profile is
still referenced from a phone number.
name (str):
Required. The name of the conversation profile to
delete. Format:
``projects/<Project ID>/locations/<Location ID>/conversationProfiles/<Conversation Profile ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversation_profile.DeleteConversationProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversation_profile.DeleteConversationProfileRequest
):
request = conversation_profile.DeleteConversationProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_conversation_profile
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def set_suggestion_feature_config(
self,
request: Union[
gcd_conversation_profile.SetSuggestionFeatureConfigRequest, dict
] = None,
*,
conversation_profile: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Adds or updates a suggestion feature in a conversation profile.
If the conversation profile contains the type of suggestion
feature for the participant role, it will update it. Otherwise
it will insert the suggestion feature.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/es/docs/how/long-running-operations>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[SetSuggestionFeatureConfigOperationMetadata][google.cloud.dialogflow.v2.SetSuggestionFeatureConfigOperationMetadata]
- ``response``:
[ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile]
If a long running operation to add or update suggestion feature
config for the same conversation profile, participant role and
suggestion feature type exists, please cancel the existing long
running operation before sending such request, otherwise the
request will be rejected.
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_set_suggestion_feature_config():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
request = dialogflow_v2.SetSuggestionFeatureConfigRequest(
conversation_profile="conversation_profile_value",
participant_role="END_USER",
)
# Make the request
operation = client.set_suggestion_feature_config(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.SetSuggestionFeatureConfigRequest, dict]):
The request object. The request message for
[ConversationProfiles.SetSuggestionFeature][].
conversation_profile (str):
Required. The Conversation Profile to add or update the
suggestion feature config. Format:
``projects/<Project ID>/locations/<Location ID>/conversationProfiles/<Conversation Profile ID>``.
This corresponds to the ``conversation_profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflow_v2.types.ConversationProfile`
Defines the services to connect to incoming Dialogflow
conversations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([conversation_profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_conversation_profile.SetSuggestionFeatureConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_conversation_profile.SetSuggestionFeatureConfigRequest
):
request = gcd_conversation_profile.SetSuggestionFeatureConfigRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if conversation_profile is not None:
request.conversation_profile = conversation_profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.set_suggestion_feature_config
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("conversation_profile", request.conversation_profile),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcd_conversation_profile.ConversationProfile,
metadata_type=gcd_conversation_profile.SetSuggestionFeatureConfigOperationMetadata,
)
# Done; return the response.
return response
def clear_suggestion_feature_config(
self,
request: Union[
gcd_conversation_profile.ClearSuggestionFeatureConfigRequest, dict
] = None,
*,
conversation_profile: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Clears a suggestion feature from a conversation profile for the
given participant role.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/es/docs/how/long-running-operations>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[ClearSuggestionFeatureConfigOperationMetadata][google.cloud.dialogflow.v2.ClearSuggestionFeatureConfigOperationMetadata]
- ``response``:
[ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile]
.. code-block:: python
from google.cloud import dialogflow_v2
def sample_clear_suggestion_feature_config():
# Create a client
client = dialogflow_v2.ConversationProfilesClient()
# Initialize request argument(s)
request = dialogflow_v2.ClearSuggestionFeatureConfigRequest(
conversation_profile="conversation_profile_value",
participant_role="END_USER",
suggestion_feature_type="SMART_REPLY",
)
# Make the request
operation = client.clear_suggestion_feature_config(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflow_v2.types.ClearSuggestionFeatureConfigRequest, dict]):
The request object. The request message for
[ConversationProfiles.ClearFeature][].
conversation_profile (str):
Required. The Conversation Profile to add or update the
suggestion feature config. Format:
``projects/<Project ID>/locations/<Location ID>/conversationProfiles/<Conversation Profile ID>``.
This corresponds to the ``conversation_profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflow_v2.types.ConversationProfile`
Defines the services to connect to incoming Dialogflow
conversations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([conversation_profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_conversation_profile.ClearSuggestionFeatureConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_conversation_profile.ClearSuggestionFeatureConfigRequest
):
request = gcd_conversation_profile.ClearSuggestionFeatureConfigRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if conversation_profile is not None:
request.conversation_profile = conversation_profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.clear_suggestion_feature_config
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("conversation_profile", request.conversation_profile),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcd_conversation_profile.ConversationProfile,
metadata_type=gcd_conversation_profile.ClearSuggestionFeatureConfigOperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ConversationProfilesClient",)
|
|
# Copyright 2012 OpenStack Foundation
# Copyright (c) 2013 Zelin.io
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SheepDog Volume Driver.
"""
import errno
import eventlet
import io
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
sheepdog_opts = [
cfg.StrOpt('sheepdog_store_address',
default='127.0.0.1',
help=('IP address of sheep daemon.')),
cfg.IntOpt('sheepdog_store_port',
min=1, max=65535,
default=7000,
help=('Port of sheep daemon.'))
]
CONF = cfg.CONF
CONF.import_opt("image_conversion_dir", "cinder.image.image_utils")
CONF.register_opts(sheepdog_opts)
class SheepdogClient(object):
"""Sheepdog command executor."""
DOG_RESP_CONNECTION_ERROR = 'failed to connect to'
DOG_RESP_CLUSTER_RUNNING = 'Cluster status: running'
DOG_RESP_CLUSTER_NOT_FORMATTED = ('Cluster status: '
'Waiting for cluster to be formatted')
DOG_RESP_CLUSTER_WAITING = ('Cluster status: '
'Waiting for other nodes to join cluster')
DOG_RESP_VDI_ALREADY_EXISTS = ': VDI exists already'
DOG_RESP_VDI_NOT_FOUND = ': No VDI found'
def __init__(self, addr, port):
self.addr = addr
self.port = port
def _run_dog(self, command, subcommand, *params):
cmd = ('env', 'LC_ALL=C', 'LANG=C', 'dog', command, subcommand,
'-a', self.addr, '-p', str(self.port)) + params
try:
return utils.execute(*cmd)
except OSError as e:
with excutils.save_and_reraise_exception():
if e.errno == errno.ENOENT:
msg = _LE('Sheepdog is not installed. '
'OSError: command is %s.')
else:
msg = _LE('OSError: command is %s.')
LOG.error(msg, cmd)
except processutils.ProcessExecutionError as e:
raise exception.SheepdogCmdError(
cmd=e.cmd,
exit_code=e.exit_code,
stdout=e.stdout.replace('\n', '\\n'),
stderr=e.stderr.replace('\n', '\\n'))
def check_cluster_status(self):
try:
(_stdout, _stderr) = self._run_dog('cluster', 'info')
except exception.SheepdogCmdError as e:
cmd = e.kwargs['cmd']
_stderr = e.kwargs['stderr']
with excutils.save_and_reraise_exception():
if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
msg = _LE('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s')
LOG.error(msg, {'addr': self.addr, 'port': self.port})
else:
LOG.error(_LE('Failed to check cluster status.'
'(command: %s)'), cmd)
if _stdout.startswith(self.DOG_RESP_CLUSTER_RUNNING):
LOG.debug('Sheepdog cluster is running.')
return
reason = _('Invalid sheepdog cluster status.')
if _stdout.startswith(self.DOG_RESP_CLUSTER_NOT_FORMATTED):
reason = _('Cluster is not formatted. '
'You should probably perform "dog cluster format".')
elif _stdout.startswith(self.DOG_RESP_CLUSTER_WAITING):
reason = _('Waiting for all nodes to join cluster. '
'Ensure all sheep daemons are running.')
raise exception.SheepdogError(reason=reason)
def create(self, vdiname, size):
try:
self._run_dog('vdi', 'create', vdiname, '%sG' % size)
except exception.SheepdogCmdError as e:
_stderr = e.kwargs['stderr']
with excutils.save_and_reraise_exception():
if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
LOG.error(_LE("Failed to connect to sheep daemon. "
"addr: %(addr)s, port: %(port)s"),
{'addr': self.addr, 'port': self.port})
elif _stderr.rstrip('\\n').endswith(
self.DOG_RESP_VDI_ALREADY_EXISTS):
LOG.error(_LE('Volume already exists. %s'), vdiname)
else:
LOG.error(_LE('Failed to create volume. %s'), vdiname)
def delete(self, vdiname):
try:
(_stdout, _stderr) = self._run_dog('vdi', 'delete', vdiname)
if _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND):
LOG.warning(_LW('Volume not found. %s'), vdiname)
elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
# NOTE(tishizaki)
# Dog command does not return error_code although
# dog command cannot connect to sheep process.
# That is a Sheepdog's bug.
# To avoid a Sheepdog's bug, now we need to check stderr.
# If Sheepdog has been fixed, this check logic is needed
# by old Sheepdog users.
reason = (_('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': self.addr, 'port': self.port})
raise exception.SheepdogError(reason=reason)
except exception.SheepdogCmdError as e:
_stderr = e.kwargs['stderr']
with excutils.save_and_reraise_exception():
if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
LOG.error(_LE('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': self.addr, 'port': self.port})
else:
LOG.error(_LE('Failed to delete volume. %s'), vdiname)
class SheepdogIOWrapper(io.RawIOBase):
"""File-like object with Sheepdog backend."""
def __init__(self, volume, snapshot_name=None):
self._vdiname = volume['name']
self._snapshot_name = snapshot_name
self._offset = 0
# SheepdogIOWrapper instance becomes invalid if a write error occurs.
self._valid = True
def _execute(self, cmd, data=None):
try:
# NOTE(yamada-h): processutils.execute causes busy waiting
# under eventlet.
# To avoid wasting CPU resources, it should not be used for
# the command which takes long time to execute.
# For workaround, we replace a subprocess module with
# the original one while only executing a read/write command.
_processutils_subprocess = processutils.subprocess
processutils.subprocess = eventlet.patcher.original('subprocess')
return processutils.execute(*cmd, process_input=data)[0]
except (processutils.ProcessExecutionError, OSError):
self._valid = False
msg = _('Sheepdog I/O Error, command was: "%s".') % ' '.join(cmd)
raise exception.VolumeDriverException(message=msg)
finally:
processutils.subprocess = _processutils_subprocess
def read(self, length=None):
if not self._valid:
msg = _('An error occurred while reading volume "%s".'
) % self._vdiname
raise exception.VolumeDriverException(message=msg)
cmd = ['dog', 'vdi', 'read']
if self._snapshot_name:
cmd.extend(('-s', self._snapshot_name))
cmd.extend((self._vdiname, self._offset))
if length:
cmd.append(length)
data = self._execute(cmd)
self._offset += len(data)
return data
def write(self, data):
if not self._valid:
msg = _('An error occurred while writing to volume "%s".'
) % self._vdiname
raise exception.VolumeDriverException(message=msg)
length = len(data)
cmd = ('dog', 'vdi', 'write', self._vdiname, self._offset, length)
self._execute(cmd, data)
self._offset += length
return length
def seek(self, offset, whence=0):
if not self._valid:
msg = _('An error occured while seeking for volume "%s".'
) % self._vdiname
raise exception.VolumeDriverException(message=msg)
if whence == 0:
# SEEK_SET or 0 - start of the stream (the default);
# offset should be zero or positive
new_offset = offset
elif whence == 1:
# SEEK_CUR or 1 - current stream position; offset may be negative
new_offset = self._offset + offset
else:
# SEEK_END or 2 - end of the stream; offset is usually negative
# TODO(yamada-h): Support SEEK_END
raise IOError(_("Invalid argument - whence=%s not supported.") %
whence)
if new_offset < 0:
raise IOError(_("Invalid argument - negative seek offset."))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
pass
def fileno(self):
"""Sheepdog does not have support for fileno so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno is not supported by SheepdogIOWrapper"))
class SheepdogDriver(driver.VolumeDriver):
"""Executes commands relating to Sheepdog Volumes."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
super(SheepdogDriver, self).__init__(*args, **kwargs)
self.client = SheepdogClient(CONF.sheepdog_store_address,
CONF.sheepdog_store_port)
self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*')
self._stats = {}
def check_for_setup_error(self):
self.client.check_cluster_status()
def _is_cloneable(self, image_location, image_meta):
"""Check the image can be clone or not."""
if image_location is None:
return False
if not image_location.startswith("sheepdog:"):
LOG.debug("Image is not stored in sheepdog.")
return False
if image_meta['disk_format'] != 'raw':
LOG.debug("Image clone requires image format to be "
"'raw' but image %s(%s) is '%s'.",
image_location,
image_meta['id'],
image_meta['disk_format'])
return False
cloneable = False
# check whether volume is stored in sheepdog
try:
# The image location would be like
# "sheepdog:192.168.10.2:7000:Alice"
(label, ip, port, name) = image_location.split(":", 3)
self._try_execute('collie', 'vdi', 'list', '--address', ip,
'--port', port, name)
cloneable = True
except processutils.ProcessExecutionError as e:
LOG.debug("Can not find vdi %(image)s: %(err)s",
{'image': name, 'err': e})
return cloneable
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image."""
image_location = image_location[0] if image_location else None
if not self._is_cloneable(image_location, image_meta):
return {}, False
# The image location would be like
# "sheepdog:192.168.10.2:7000:Alice"
(label, ip, port, name) = image_location.split(":", 3)
volume_ref = {'name': name, 'size': image_meta['size']}
self.create_cloned_volume(volume, volume_ref)
self._resize(volume)
vol_path = self.local_path(volume)
return {'provider_location': vol_path}, True
def create_cloned_volume(self, volume, src_vref):
"""Clone a sheepdog volume from another volume."""
snapshot_name = src_vref['name'] + '-temp-snapshot'
snapshot = {
'name': snapshot_name,
'volume_name': src_vref['name'],
'volume_size': src_vref['size'],
}
self.create_snapshot(snapshot)
try:
# Create volume
self.create_volume_from_snapshot(volume, snapshot)
except processutils.ProcessExecutionError:
msg = _('Failed to create cloned volume %s.') % volume['id']
LOG.error(msg)
raise exception.VolumeBackendAPIException(msg)
finally:
# Delete temp Snapshot
self.delete_snapshot(snapshot)
def create_volume(self, volume):
"""Create a sheepdog volume."""
self.client.create(volume.name, volume.size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a sheepdog volume from a snapshot."""
self._try_execute('qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (snapshot['volume_name'],
snapshot['name']),
"sheepdog:%s" % volume['name'],
'%sG' % volume['size'])
def delete_volume(self, volume):
"""Delete a logical volume."""
self.client.delete(volume.name)
def _resize(self, volume, size=None):
if not size:
size = int(volume['size']) * units.Gi
self._try_execute('collie', 'vdi', 'resize',
volume['name'], size)
def copy_image_to_volume(self, context, volume, image_service, image_id):
with image_utils.temporary_file() as tmp:
# (wenhao): we don't need to convert to raw for sheepdog.
image_utils.fetch_verify_image(context, image_service,
image_id, tmp)
# remove the image created by import before this function.
# see volume/drivers/manager.py:_create_volume
self.client.delete(volume.name)
# convert and store into sheepdog
image_utils.convert_image(tmp, 'sheepdog:%s' % volume['name'],
'raw')
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_id = image_meta['id']
with image_utils.temporary_file() as tmp:
# image_utils.convert_image doesn't support "sheepdog:" source,
# so we use the qemu-img directly.
# Sheepdog volume is always raw-formatted.
cmd = ('qemu-img',
'convert',
'-f', 'raw',
'-t', 'none',
'-O', 'raw',
'sheepdog:%s' % volume['name'],
tmp)
self._try_execute(*cmd)
with open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
def create_snapshot(self, snapshot):
"""Create a sheepdog snapshot."""
self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
"sheepdog:%s" % snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Delete a sheepdog snapshot."""
self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
'-s', snapshot['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
def ensure_export(self, context, volume):
"""Safely and synchronously recreate an export for a logical volume."""
pass
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'sheepdog',
'data': {
'name': volume['name']
}
}
def terminate_connection(self, volume, connector, **kwargs):
pass
def _update_volume_stats(self):
stats = {}
backend_name = "sheepdog"
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
stats["volume_backend_name"] = backend_name or 'sheepdog'
stats['vendor_name'] = 'Open Source'
stats['driver_version'] = self.VERSION
stats['storage_protocol'] = 'sheepdog'
stats['total_capacity_gb'] = 'unknown'
stats['free_capacity_gb'] = 'unknown'
stats['reserved_percentage'] = 0
stats['QoS_support'] = False
try:
stdout, _err = self._execute('collie', 'node', 'info', '-r')
m = self.stats_pattern.match(stdout)
total = float(m.group(1))
used = float(m.group(2))
stats['total_capacity_gb'] = total / units.Gi
stats['free_capacity_gb'] = (total - used) / units.Gi
except processutils.ProcessExecutionError:
LOG.exception(_LE('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an Existing Volume."""
old_size = volume['size']
try:
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
temp_snapshot = {'volume_name': volume['name'],
'name': 'tmp-snap-%s' % volume['name']}
# NOTE(tishizaki): If previous backup_volume operation has failed,
# a temporary snapshot for previous operation may exist.
# So, the old snapshot must be deleted before backup_volume.
# Sheepdog 0.9 or later 'delete_snapshot' operation
# is done successfully, although target snapshot does not exist.
# However, sheepdog 0.8 or before 'delete_snapshot' operation
# is failed, and raise ProcessExecutionError when target snapshot
# does not exist.
try:
self.delete_snapshot(temp_snapshot)
except (processutils.ProcessExecutionError):
pass
try:
self.create_snapshot(temp_snapshot)
except (processutils.ProcessExecutionError, OSError):
msg = (_('Failed to create a temporary snapshot for volume %s.')
% volume['id'])
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
sheepdog_fd = SheepdogIOWrapper(volume, temp_snapshot['name'])
backup_service.backup(backup, sheepdog_fd)
finally:
self.delete_snapshot(temp_snapshot)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
sheepdog_fd = SheepdogIOWrapper(volume)
backup_service.restore(backup, volume['id'], sheepdog_fd)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides access functions for the app identity service."""
import os
import time
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api.app_identity import app_identity_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BackendDeadlineExceeded',
'BlobSizeTooLarge',
'InternalError',
'InvalidScope',
'NotAllowed',
'OperationNotImplemented',
'Error',
'create_rpc',
'make_sign_blob_call',
'make_get_public_certificates_call',
'make_get_service_account_name_call',
'sign_blob',
'get_public_certificates',
'PublicCertificate',
'get_service_account_name',
'get_application_id',
'get_default_version_hostname',
'get_access_token',
'get_access_token_uncached',
'make_get_access_token_call',
'get_default_gcs_bucket_name',
'make_get_default_gcs_bucket_name_call',
]
_APP_IDENTITY_SERVICE_NAME = 'app_identity_service'
_SIGN_FOR_APP_METHOD_NAME = 'SignForApp'
_GET_CERTS_METHOD_NAME = 'GetPublicCertificatesForApp'
_GET_SERVICE_ACCOUNT_NAME_METHOD_NAME = 'GetServiceAccountName'
_GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME = 'GetDefaultGcsBucketName'
_GET_ACCESS_TOKEN_METHOD_NAME = 'GetAccessToken'
_PARTITION_SEPARATOR = '~'
_DOMAIN_SEPARATOR = ':'
_MEMCACHE_KEY_PREFIX = '_ah_app_identity_'
_MEMCACHE_NAMESPACE = '_ah_'
_TOKEN_EXPIRY_SAFETY_MARGIN = 300
_MAX_TOKEN_CACHE_SIZE = 100
_MAX_RANDOM_EXPIRY_DELTA = 60
_access_token_cache = {}
_random_cache_expiry_delta = (
hash(time.time()) % (_MAX_RANDOM_EXPIRY_DELTA * 1000) / 1000.0)
class Error(Exception):
"""Base error type."""
class BackendDeadlineExceeded(Error):
"""Communication to backend service timed-out."""
class BlobSizeTooLarge(Error):
"""Size of blob to sign is larger than the allowed limit."""
class InternalError(Error):
"""Unspecified internal failure."""
class InvalidScope(Error):
"""Invalid scope."""
class NotAllowed(Error):
"""The operation is not allowed."""
class OperationNotImplemented(Error):
"""The operation is not implemented for the service account."""
def _to_app_identity_error(error):
"""Translate an application error to an external Error, if possible.
Args:
error: An ApplicationError to translate.
Returns:
error: app identity API specific error message.
"""
error_map = {
app_identity_service_pb.AppIdentityServiceError.NOT_A_VALID_APP:
InternalError,
app_identity_service_pb.AppIdentityServiceError.DEADLINE_EXCEEDED:
BackendDeadlineExceeded,
app_identity_service_pb.AppIdentityServiceError.BLOB_TOO_LARGE:
BlobSizeTooLarge,
app_identity_service_pb.AppIdentityServiceError.UNKNOWN_ERROR:
InternalError,
app_identity_service_pb.AppIdentityServiceError.UNKNOWN_SCOPE:
InvalidScope,
app_identity_service_pb.AppIdentityServiceError.NOT_ALLOWED:
NotAllowed,
app_identity_service_pb.AppIdentityServiceError.NOT_IMPLEMENTED:
OperationNotImplemented,
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return InternalError('%s: %s' %
(error.application_error, error.error_detail))
class PublicCertificate(object):
"""Info about public certificate.
Attributes:
key_name: name of the certificate.
x509_certificate_pem: x509 cerficiates in pem format.
"""
def __init__(self, key_name, x509_certificate_pem):
self.key_name = key_name
self.x509_certificate_pem = x509_certificate_pem
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the App identity API.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An apiproxy_stub_map.UserRPC object specialized for this service.
"""
return apiproxy_stub_map.UserRPC(_APP_IDENTITY_SERVICE_NAME,
deadline, callback)
def make_sign_blob_call(rpc, bytes_to_sign):
"""Executes the RPC call to sign a blob.
Args:
rpc: a UserRPC instance.
bytes_to_sign: blob that needs to be signed.
Returns:
A tuple that contains the signing key name and the signature.
Raises:
TypeError: when bytes_to_sign is not a str.
"""
if not isinstance(bytes_to_sign, str):
raise TypeError('bytes_to_sign must be str: %s'
% bytes_to_sign)
request = app_identity_service_pb.SignForAppRequest()
request.set_bytes_to_sign(bytes_to_sign)
response = app_identity_service_pb.SignForAppResponse()
def signing_for_app_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A tuple that contains signing key name and signature.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _SIGN_FOR_APP_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return (response.key_name(), response.signature_bytes())
rpc.make_call(_SIGN_FOR_APP_METHOD_NAME, request,
response, signing_for_app_result)
def make_get_public_certificates_call(rpc):
"""Executes the RPC call to get a list of public certificates.
Args:
rpc: a UserRPC instance.
Returns:
A list of PublicCertificate object.
"""
request = app_identity_service_pb.GetPublicCertificateForAppRequest()
response = app_identity_service_pb.GetPublicCertificateForAppResponse()
def get_certs_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A list of PublicCertificate object.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_CERTS_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
result = []
for cert in response.public_certificate_list_list():
result.append(PublicCertificate(
cert.key_name(), cert.x509_certificate_pem()))
return result
rpc.make_call(_GET_CERTS_METHOD_NAME, request, response, get_certs_result)
def make_get_service_account_name_call(rpc):
"""Get service account name of the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Service account name of the app.
"""
request = app_identity_service_pb.GetServiceAccountNameRequest()
response = app_identity_service_pb.GetServiceAccountNameResponse()
if rpc.deadline is not None:
request.set_deadline(rpc.deadline)
def get_service_account_name_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A string which is service account name of the app.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_SERVICE_ACCOUNT_NAME_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return response.service_account_name()
rpc.make_call(_GET_SERVICE_ACCOUNT_NAME_METHOD_NAME, request,
response, get_service_account_name_result)
def make_get_default_gcs_bucket_name_call(rpc):
"""Get default google storage bucket name for the app.
Args:
rpc: A UserRPC object.
Returns:
Default Google Storage Bucket name of the app.
"""
request = app_identity_service_pb.GetDefaultGcsBucketNameRequest()
response = app_identity_service_pb.GetDefaultGcsBucketNameResponse()
if rpc.deadline is not None:
request.set_deadline(rpc.deadline)
def get_default_gcs_bucket_name_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A string which is the name of the app's default google storage bucket.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME, (
repr(rpc.method))
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
if response.has_default_gcs_bucket_name():
return response.default_gcs_bucket_name()
else:
return None
rpc.make_call(_GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME, request,
response, get_default_gcs_bucket_name_result)
def sign_blob(bytes_to_sign, deadline=None):
"""Signs a blob.
Args:
bytes_to_sign: blob that needs to be signed.
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Tuple, signing key name and signature.
"""
rpc = create_rpc(deadline)
make_sign_blob_call(rpc, bytes_to_sign)
rpc.wait()
return rpc.get_result()
def get_public_certificates(deadline=None):
"""Get public certificates.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
A list of PublicCertificate object.
"""
rpc = create_rpc(deadline)
make_get_public_certificates_call(rpc)
rpc.wait()
return rpc.get_result()
def get_service_account_name(deadline=None):
"""Get service account name of the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Service account name of the app.
"""
rpc = create_rpc(deadline)
make_get_service_account_name_call(rpc)
rpc.wait()
return rpc.get_result()
def get_default_gcs_bucket_name(deadline=None):
"""Gets the default gs bucket name for the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Default bucket name for the app.
"""
rpc = create_rpc(deadline)
make_get_default_gcs_bucket_name_call(rpc)
rpc.wait()
return rpc.get_result()
def _ParseFullAppId(app_id):
"""Parse a full app id into partition, domain name and display app_id.
Args:
app_id: The full partitioned app id.
Returns:
A tuple (partition, domain_name, display_app_id). The partition
and domain name may be empty.
"""
partition = ''
psep = app_id.find(_PARTITION_SEPARATOR)
if psep > 0:
partition = app_id[:psep]
app_id = app_id[psep+1:]
domain_name = ''
dsep = app_id.find(_DOMAIN_SEPARATOR)
if dsep > 0:
domain_name = app_id[:dsep]
app_id = app_id[dsep+1:]
return partition, domain_name, app_id
def get_application_id():
"""Get the application id of an app.
Returns:
The application id of the app.
"""
full_app_id = os.getenv('APPLICATION_ID')
_, domain_name, display_app_id = _ParseFullAppId(full_app_id)
if domain_name:
return '%s%s%s' % (domain_name, _DOMAIN_SEPARATOR, display_app_id)
return display_app_id
def get_default_version_hostname():
"""Get the standard hostname of the default version of the app.
For example if your application_id is my-app then the result might be
my-app.appspot.com.
Returns:
The standard hostname of the default version of the application.
"""
return os.getenv('DEFAULT_VERSION_HOSTNAME')
def make_get_access_token_call(rpc, scopes, service_account_id=None):
"""OAuth2 access token to act on behalf of the application (async, uncached).
Most developers should use get_access_token instead.
Args:
rpc: RPC object.
scopes: The requested API scope string, or a list of strings.
Raises:
InvalidScope: if the scopes are unspecified or invalid.
"""
request = app_identity_service_pb.GetAccessTokenRequest()
if not scopes:
raise InvalidScope('No scopes specified.')
if isinstance(scopes, basestring):
request.add_scope(scopes)
else:
for scope in scopes:
request.add_scope(scope)
if service_account_id:
if isinstance(service_account_id, (int, long)):
request.set_service_account_id(service_account_id)
elif isinstance(service_account_id, basestring):
request.set_service_account_name(service_account_id)
else:
raise TypeError()
response = app_identity_service_pb.GetAccessTokenResponse()
def get_access_token_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_ACCESS_TOKEN_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return response.access_token(), response.expiration_time()
rpc.make_call(_GET_ACCESS_TOKEN_METHOD_NAME, request,
response, get_access_token_result)
def get_access_token_uncached(scopes, deadline=None, service_account_id=None):
"""OAuth2 access token to act on behalf of the application (sync, uncached).
Most developers should use get_access_token instead.
Args:
scopes: The requested API scope string, or a list of strings.
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
rpc = create_rpc(deadline)
make_get_access_token_call(rpc, scopes, service_account_id=service_account_id)
rpc.wait()
return rpc.get_result()
def get_access_token(scopes, service_account_id=None):
"""OAuth2 access token to act on behalf of the application, cached.
Generates and caches an OAuth2 access token for the service account for the
appengine application.
Each application has an associated Google account. This function returns
OAuth2 access token corresponding to the running app. Access tokens are safe
to cache and reuse until their expiry time as returned. This method will
do that using both an in-process cache and memcache.
Args:
scopes: The requested API scope string, or a list of strings.
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
cache_key = _MEMCACHE_KEY_PREFIX + str(scopes)
if service_account_id:
cache_key += ',%s' % service_account_id
cached = _access_token_cache.get(cache_key)
if cached is not None:
access_token, expires_at = cached
safe_expiry = (expires_at - _TOKEN_EXPIRY_SAFETY_MARGIN -
_random_cache_expiry_delta)
if time.time() < safe_expiry:
return access_token, expires_at
memcache_value = memcache.get(cache_key, namespace=_MEMCACHE_NAMESPACE)
if memcache_value:
access_token, expires_at = memcache_value
else:
access_token, expires_at = get_access_token_uncached(
scopes, service_account_id=service_account_id)
memcache_expiry = expires_at - _TOKEN_EXPIRY_SAFETY_MARGIN
memcache_expiry -= _MAX_RANDOM_EXPIRY_DELTA
memcache_expiry -= 10
memcache.add(cache_key, (access_token, expires_at),
memcache_expiry,
namespace=_MEMCACHE_NAMESPACE)
if len(_access_token_cache) >= _MAX_TOKEN_CACHE_SIZE:
_access_token_cache.clear()
_access_token_cache[cache_key] = (access_token, expires_at)
return access_token, expires_at
|
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import cookielib
import os
import random
import re
import zlib
import urllib
from urllib2 import parse_http_list as _parse_list_header
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return name
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def header_expand(headers):
"""Returns an HTTP Header value string from a dictionary.
Example expansion::
{'text/x-dvi': {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}, 'text/x-c': {}}
# Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c
(('text/x-dvi', {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}), ('text/x-c', {}))
# Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c
"""
collector = []
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, basestring):
return headers
for i, (value, params) in enumerate(headers):
_params = []
for (p_k, p_v) in params.items():
_params.append('%s=%s' % (p_k, p_v))
collector.append(value)
collector.append('; ')
if len(params):
collector.append('; '.join(_params))
if not len(headers) == i+1:
collector.append(', ')
# Remove trailing separators.
if collector[-1] in (', ', '; '):
del collector[-1]
return ''.join(collector)
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for _, cookies in cj._cookies.items():
for _, cookies in cookies.items():
for cookie in cookies.values():
# print cookie
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def cookiejar_from_dict(cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
# return cookiejar if one was passed in
if isinstance(cookie_dict, cookielib.CookieJar):
return cookie_dict
# create cookiejar
cj = cookielib.CookieJar()
cj = add_dict_to_cookiejar(cj, cookie_dict)
return cj
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
for k, v in cookie_dict.items():
cookie = cookielib.Cookie(
version=0,
name=k,
value=v,
port=None,
port_specified=False,
domain='',
domain_specified=False,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False
)
# add cookie to cookiejar
cj.set_cookie(cookie)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
return charset_re.findall(content)
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def unicode_from_html(content):
"""Attempts to decode an HTML string into unicode.
If unsuccessful, the original content is returned.
"""
encodings = get_encodings_from_content(content)
for encoding in encodings:
try:
return unicode(content, encoding)
except (UnicodeError, TypeError):
pass
return content
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
if rv:
yield rv
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return unicode(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return unicode(r.content, encoding, errors='replace')
except TypeError:
return r.content
def decode_gzip(content):
"""Return gzip-decoded string.
:param content: bytestring to gzip-decode.
"""
return zlib.decompress(content, 16 + zlib.MAX_WBITS)
def stream_decompress(iterator, mode='gzip'):
"""
Stream decodes an iterator over compressed data
:param iterator: An iterator over compressed data
:param mode: 'gzip' or 'deflate'
:return: An iterator over decompressed data
"""
if mode not in ['gzip', 'deflate']:
raise ValueError('stream_decompress mode must be gzip or deflate')
zlib_mode = 16 + zlib.MAX_WBITS if mode == 'gzip' else -zlib.MAX_WBITS
dec = zlib.decompressobj(zlib_mode)
try:
for chunk in iterator:
rv = dec.decompress(chunk)
if rv:
yield rv
except zlib.error:
# If there was an error decompressing, just return the raw chunk
yield chunk
# Continue to return the rest of the raw data
for chunk in iterator:
yield chunk
else:
# Make sure everything has been returned from the decompression object
buf = dec.decompress('')
rv = buf + dec.flush()
if rv:
yield rv
def requote_path(path):
"""Re-quote the given URL path component.
This function passes the given path through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
parts = path.split("/")
parts = (urllib.quote(urllib.unquote(part), safe="") for part in parts)
return "/".join(parts)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
import smtplib
import email.feedparser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email import header
import tg
from paste.deploy.converters import asbool, asint, aslist
from formencode import validators as fev
from pylons import tmpl_context as c
from pylons import app_globals as g
from allura.lib.utils import ConfigProxy
from allura.lib import exceptions as exc
from allura.lib import helpers as h
log = logging.getLogger(__name__)
RE_MESSAGE_ID = re.compile(r'<(?:[^>]*/)?([^>]*)>')
config = ConfigProxy(
common_suffix='forgemail.domain',
return_path='forgemail.return_path')
EMAIL_VALIDATOR = fev.Email(not_empty=True)
def Header(text, *more_text):
'''Helper to make sure we encode headers properly'''
if isinstance(text, header.Header):
return text
# email.header.Header handles str vs unicode differently
# see
# http://docs.python.org/library/email.header.html#email.header.Header.append
if type(text) != unicode:
raise TypeError('This must be unicode: %r' % text)
head = header.Header(text)
for m in more_text:
if type(m) != unicode:
raise TypeError('This must be unicode: %r' % text)
head.append(m)
return head
def AddrHeader(fromaddr):
'''Accepts any of:
Header() instance
foo@bar.com
"Foo Bar" <foo@bar.com>
'''
if isinstance(fromaddr, basestring) and ' <' in fromaddr:
name, addr = fromaddr.rsplit(' <', 1)
addr = '<' + addr # restore the char we just split off
addrheader = Header(name, addr)
if str(addrheader).startswith('=?'): # encoding escape chars
# then quoting the name is no longer necessary
name = name.strip('"')
addrheader = Header(name, addr)
else:
addrheader = Header(fromaddr)
return addrheader
def is_autoreply(msg):
'''Returns True, if message is an autoreply
Detection based on suggestions from
https://github.com/opennorth/multi_mail/wiki/Detecting-autoresponders
'''
h = msg['headers']
return (
h.get('Auto-Submitted') == 'auto-replied'
or h.get('X-POST-MessageClass') == '9; Autoresponder'
or h.get('Delivered-To') == 'Autoresponder'
or h.get('X-FC-MachineGenerated') == 'true'
or h.get('X-AutoReply-From') is not None
or h.get('X-Autogenerated') in ['Forward', 'Group', 'Letter', 'Mirror', 'Redirect', 'Reply']
or h.get('X-Precedence') == 'auto_reply'
or h.get('Return-Path') == '<>'
)
def parse_address(addr):
userpart, domain = addr.split('@')
# remove common domain suffix
if not domain.endswith(config.common_suffix):
raise exc.AddressException, 'Unknown domain: ' + domain
domain = domain[:-len(config.common_suffix)]
path = '/'.join(reversed(domain.split('.')))
project, mount_point = h.find_project('/' + path)
if project is None:
raise exc.AddressException, 'Unknown project: ' + domain
if len(mount_point) != 1:
raise exc.AddressException, 'Unknown tool: ' + domain
with h.push_config(c, project=project):
app = project.app_instance(mount_point[0])
if not app:
raise exc.AddressException, 'Unknown tool: ' + domain
return userpart, project, app
def parse_message(data):
# Parse the email to its constituent parts
parser = email.feedparser.FeedParser()
parser.feed(data)
msg = parser.close()
# Extract relevant data
result = {}
result['multipart'] = multipart = msg.is_multipart()
result['headers'] = dict(msg)
result['message_id'] = _parse_message_id(msg.get('Message-ID'))
result['in_reply_to'] = _parse_message_id(msg.get('In-Reply-To'))
result['references'] = _parse_message_id(msg.get('References'))
if result['message_id'] == []:
result['message_id'] = h.gen_message_id()
else:
result['message_id'] = result['message_id'][0]
if multipart:
result['parts'] = []
for part in msg.walk():
dpart = dict(
headers=dict(part),
message_id=result['message_id'],
in_reply_to=result['in_reply_to'],
references=result['references'],
content_type=part.get_content_type(),
filename=part.get_filename(None),
payload=part.get_payload(decode=True))
charset = part.get_content_charset()
if charset:
dpart['payload'] = dpart['payload'].decode(charset)
result['parts'].append(dpart)
else:
result['payload'] = msg.get_payload(decode=True)
charset = msg.get_content_charset()
if charset:
result['payload'] = result['payload'].decode(charset)
return result
def identify_sender(peer, email_address, headers, msg):
from allura import model as M
# Dumb ID -- just look for email address claimed by a particular user
addr = M.EmailAddress.query.get(
_id=M.EmailAddress.canonical(email_address))
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user()
from_address = headers.get('From', '').strip()
if not from_address:
return M.User.anonymous()
addr = M.EmailAddress.query.get(_id=M.EmailAddress.canonical(from_address))
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user()
return M.User.anonymous()
def encode_email_part(content, content_type):
try:
return MIMEText(content.encode('ascii'), content_type, 'ascii')
except:
return MIMEText(content.encode('utf-8'), content_type, 'utf-8')
def make_multipart_message(*parts):
msg = MIMEMultipart('related')
msg.preamble = 'This is a multi-part message in MIME format.'
alt = MIMEMultipart('alternative')
msg.attach(alt)
for part in parts:
alt.attach(part)
return msg
def _parse_message_id(msgid):
if msgid is None:
return []
return [mo.group(1)
for mo in RE_MESSAGE_ID.finditer(msgid)]
def _parse_smtp_addr(addr):
addr = str(addr)
addrs = _parse_message_id(addr)
if addrs and addrs[0]:
return addrs[0]
if '@' in addr:
return addr
return g.noreply
def isvalid(addr):
'''return True if addr is a (possibly) valid email address, false
otherwise'''
try:
EMAIL_VALIDATOR.to_python(addr, None)
return True
except fev.Invalid:
return False
class SMTPClient(object):
def __init__(self):
self._client = None
def sendmail(
self, addrs, fromaddr, reply_to, subject, message_id, in_reply_to, message,
sender=None, references=None, cc=None, to=None):
if not addrs:
return
if to:
message['To'] = AddrHeader(h.really_unicode(to))
else:
message['To'] = AddrHeader(reply_to)
message['From'] = AddrHeader(fromaddr)
message['Reply-To'] = AddrHeader(reply_to)
message['Subject'] = Header(subject)
message['Message-ID'] = Header('<' + message_id + u'>')
if sender:
message['Sender'] = AddrHeader(sender)
if cc:
message['CC'] = AddrHeader(cc)
addrs.append(cc)
if in_reply_to:
if not isinstance(in_reply_to, basestring):
raise TypeError('Only strings are supported now, not lists')
message['In-Reply-To'] = Header(u'<%s>' % in_reply_to)
if not references:
message['References'] = message['In-Reply-To']
if references:
references = [u'<%s>' % r for r in aslist(references)]
message['References'] = Header(*references)
content = message.as_string()
smtp_addrs = map(_parse_smtp_addr, addrs)
smtp_addrs = [a for a in smtp_addrs if isvalid(a)]
if not smtp_addrs:
log.warning('No valid addrs in %s, so not sending mail',
map(unicode, addrs))
return
try:
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
except:
self._connect()
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
def _connect(self):
if asbool(tg.config.get('smtp_ssl', False)):
smtp_client = smtplib.SMTP_SSL(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 25)),
timeout=float(tg.config.get('smtp_timeout', 10)),
)
else:
smtp_client = smtplib.SMTP(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 465)),
timeout=float(tg.config.get('smtp_timeout', 10)),
)
if tg.config.get('smtp_user', None):
smtp_client.login(tg.config['smtp_user'],
tg.config['smtp_password'])
if asbool(tg.config.get('smtp_tls', False)):
smtp_client.starttls()
self._client = smtp_client
|
|
#!/usr/bin/env python
# Data manager for reference data for the QIIME Galaxy tools
import argparse
import ftplib
import json
import os
import tarfile
import zipfile
import requests
protocol = {
"unite": "http",
"greengenes": "ftp",
"silva": "http",
"img": "ftp"
}
baseUrl = {
"unite": "http://unite.ut.ee/sh_files/sh_qiime_release_",
"greengenes": "greengenes.microbio.me",
"silva": "http://www.arb-silva.de/fileadmin/silva_databases/qiime/Silva_",
"img": "ftp.microbio.me"
}
ftp_dir = {
"greengenes": "/greengenes_release/gg_",
"img": ""
}
ftp_file_prefix = {
"greengenes": "gg_",
"img": ""
}
ftp_file_suffix = {
"greengenes": "_otus",
"img": ""
}
extension = {
"unite": "zip",
"greengenes": "tar.gz",
"silva": {
"104_release": "tgz",
"108_release": "tgz",
"108_release_curated": "tgz",
"111_release": "tgz",
"119_consensus_majority_taxonomy": "zip",
"119_release": "zip",
"119_release_aligned_rep_files": "tar.gz",
"123_release": "zip",
"128_release": "tgz"},
"img": "tgz"
}
filetypes = ["rep_set", "rep_set_aligned", "taxonomy", "trees"]
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def get_ftp_file(ftp, filename):
"""
"""
try:
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
except Exception:
print("Error")
def download_archive(db, version, ext):
"""
"""
filepath = "%s_%s.%s" % (db, version, ext)
if protocol[db] == "http":
url = "%s%s.%s" % (baseUrl[db], version, ext)
r = requests.get(url, stream=True)
r.raise_for_status()
with open(filepath, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
elif protocol[db] == "ftp":
ftp = ftplib.FTP(baseUrl[db])
ftp.login("anonymous", "ftplib-example-1")
if db == "greengenes" and version == "13_8":
ftp.cwd("%s%s" % (ftp_dir[db], "13_5"))
else:
ftp.cwd("%s%s" % (ftp_dir[db], version))
filepath = "%s%s%s.%s" % (
ftp_file_prefix[db],
version,
ftp_file_suffix[db],
ext)
get_ftp_file(ftp, filepath)
ftp.quit()
return filepath
def find_archive_content_path(archive_content_path):
"""
"""
content = os.listdir(archive_content_path)
archive_content = []
for x in content:
if not x.startswith(".") and not x.startswith("_"):
archive_content.append(x)
if len(archive_content) == 1:
archive_content_path = os.path.join(
archive_content_path,
archive_content[0])
return archive_content_path
def extract_archive(filepath, ext, db):
"""
"""
archive_content_path = "tmp"
if ext == "tar.gz" or ext == "tgz":
tar = tarfile.open(filepath)
tar.extractall(path=archive_content_path)
tar.close()
archive_content_path = find_archive_content_path(archive_content_path)
elif ext == "zip":
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(archive_content_path)
zip_ref.close()
archive_content_path = find_archive_content_path(archive_content_path)
return archive_content_path
def move_unite_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir):
"""
"""
archive_content = os.listdir(archive_content_path)
for content in archive_content:
content_filepath = os.path.join(archive_content_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if content.find("refs") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "rep_set"),
"rep_set")
elif content.find("taxonomy") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "taxonomy"),
"taxonomy")
def move_file(input_filepath, filename, name, data_tables, target_dir, filetype):
"""
"""
output_filepath = os.path.join(target_dir, filename)
os.rename(input_filepath, output_filepath)
add_data_table_entry(
data_tables,
"qiime_%s" % (filetype),
dict(
dbkey=filename,
value=os.path.splitext(filename)[0],
name=name,
path=output_filepath))
def move_dir_content(input_path, filename_prefix, name_prefix, data_tables, target_dir, filetype):
"""
"""
for content in os.listdir(input_path):
if content.startswith("."):
continue
content_path = os.path.join(input_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if os.path.isdir(content_path):
move_dir_content(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
else:
move_file(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
def move_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir, db, version):
"""
"""
for filetype in filetypes:
if filetype == "rep_set_aligned":
if db == "greengenes" and version == "12_10":
continue
filetype_target_dir = os.path.join(
target_dir,
filetype)
filetype_path = os.path.join(
archive_content_path,
filetype)
move_dir_content(
filetype_path,
filename_prefix,
name_prefix,
data_tables,
filetype_target_dir,
filetype)
def download_db(data_tables, db, version, target_dir):
"""Download QIIME database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
db: name of the database
version: version of the database
table_name: name of the table
target_dir: directory to put copy or link to the data file
"""
ext = extension[db]
if db == "silva":
ext = ext[version]
print("Download archive")
filepath = download_archive(db, version, ext)
print("Extract archive %s" % filepath)
archive_content_path = extract_archive(filepath, ext, db)
print("Moving file from %s" % archive_content_path)
filename_prefix = "%s_%s" % (db, version)
name_prefix = "%s (%s)" % (db, version)
if db == "greengenes" or db == "silva":
move_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir,
db,
version)
elif db == "unite":
move_unite_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir)
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = argparse.ArgumentParser(
description='Download QIIME reference database')
parser.add_argument('--database', help="Database name")
parser.add_argument('--version', help="Database version")
parser.add_argument('--jsonfile', help="Output JSON file")
args = parser.parse_args()
jsonfile = args.jsonfile
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
os.mkdir(os.path.join(target_dir, "rep_set"))
os.mkdir(os.path.join(target_dir, "rep_set_aligned"))
os.mkdir(os.path.join(target_dir, "taxonomy"))
os.mkdir(os.path.join(target_dir, "trees"))
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, "qiime_rep_set")
add_data_table(data_tables, "qiime_rep_set_aligned")
add_data_table(data_tables, "qiime_taxonomy")
add_data_table(data_tables, "qiime_trees")
# Fetch data from specified data sources
download_db(
data_tables,
args.database,
args.version,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
with open(jsonfile, 'w') as out:
json.dump(data_tables, out)
print("Done.")
|
|
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import timezone
from notifications.models import Notification
from storages.backends.s3boto3 import S3Boto3Storage
from eventkit_cloud.core.helpers import sendnotification, NotificationVerb, NotificationLevel
from eventkit_cloud.core.models import (
UIDMixin,
TimeStampedModelMixin,
TimeTrackingModelMixin,
LowerCaseCharField,
DownloadableMixin,
)
from eventkit_cloud.jobs.helpers import get_valid_regional_justification
from eventkit_cloud.jobs.models import Job, DataProvider, JobPermissionLevel, JobPermission, RegionalPolicy
from eventkit_cloud.tasks import get_cache_value, set_cache_value, DEFAULT_CACHE_EXPIRATION
from eventkit_cloud.tasks.enumerations import TaskState
logger = logging.getLogger(__name__)
def get_all_users_by_permissions(permissions):
return User.objects.filter(
models.Q(groups__name=permissions["groups"]) | models.Q(username__in=permissions["members"])
).distinct()
def notification_delete(instance):
for notification in Notification.objects.filter(actor_object_id=instance.id):
ct = ContentType.objects.filter(pk=notification.actor_content_type_id).get()
if ct == ContentType.objects.get_for_model(type(instance)):
notification.delete()
def notification_soft_delete(instance):
for notification in Notification.objects.filter(actor_object_id=instance.id):
ct = ContentType.objects.filter(pk=notification.actor_content_type_id).get()
if ct == ContentType.objects.get_for_model(type(instance)):
notification.public = False
notification.save()
class NotificationModelMixin(models.Model):
def delete_notifications(self, *args, **kwargs):
notification_delete(self)
def soft_delete_notifications(self, *args, **kwargs):
permissions = kwargs.get("permissions")
if permissions:
users = get_all_users_by_permissions(permissions)
logger.error("users: {0}".format(users))
for user in users:
logger.error("Sending notification to {0}".format(user))
sendnotification(
self,
user,
NotificationVerb.RUN_DELETED.value,
None,
None,
NotificationLevel.WARNING.value,
getattr(self, "status", "DELETED"),
)
class Meta:
abstract = True
class FileProducingTaskResult(UIDMixin, DownloadableMixin, NotificationModelMixin):
"""
A FileProducingTaskResult holds the information from the task, i.e. the reason for executing the task.
"""
deleted = models.BooleanField(default=False)
def soft_delete(self, *args, **kwargs):
from eventkit_cloud.tasks.signals import exporttaskresult_delete_exports
exporttaskresult_delete_exports(self.__class__, self)
self.deleted = True
self.export_task.display = False
self.save()
def user_can_download(self, user: User):
"""
Checks to see if the user has all of the required permissions to download the file. To not make these
requests slower ideally the downloadable will have already
select_related("export_task__export_provider_task__provider", "export_task__export_provider_task__run")
:param user: The user requesting the file.
:param downloadable: The downloadable file.
:return:
"""
jobs = JobPermission.userjobs(user, JobPermissionLevel.READ.value)
job = jobs.filter(runs__data_provider_task_records__tasks__result=self).first()
providers = []
if not job:
return False
# Check the associated RunZipFile for attribute classes.
attribute_classes = []
for run_zip_file in self.runzipfile_set.all():
for data_provider_task_record in run_zip_file.data_provider_task_records.all():
providers.append(data_provider_task_record.provider)
if data_provider_task_record.provider.attribute_class:
attribute_classes.append(data_provider_task_record.provider.attribute_class)
for attribute_class in attribute_classes:
if attribute_class and not attribute_class.users.filter(id=user.id):
return False
# Get the providers associated with this download if it's not a zipfile.
if self.export_task.export_provider_task.provider:
providers.append(self.export_task.export_provider_task.provider)
# Check to make sure the user has agreed to the regional policy if one exists.
for policy in RegionalPolicy.objects.filter(
region__the_geom__intersects=job.the_geom, providers__in=providers
).prefetch_related("justifications"):
if not get_valid_regional_justification(policy, user):
return False
return True
class Meta:
managed = True
db_table = "export_task_results"
def __str__(self):
return "FileProducingTaskResult ({}), {}".format(self.uid, self.filename)
def clone(self):
self.id = None
self.uid = None
self.save()
return self
class ExportRun(UIDMixin, TimeStampedModelMixin, TimeTrackingModelMixin, NotificationModelMixin):
"""
ExportRun is the main structure for storing export information.
A Job provides information for the ExportRun.
Many ExportRuns can map to a Job.
Many DataProviderTasks can map to an ExportRun.
Many ExportTasks can map to an DataProviderTaskRecord.
"""
job = models.ForeignKey(Job, related_name="runs", on_delete=models.CASCADE)
parent_run = models.ForeignKey(
"ExportRun", related_name="child_runs", null=True, default=None, on_delete=models.SET_NULL
)
user = models.ForeignKey(User, related_name="runs", default=0, on_delete=models.CASCADE)
worker = models.CharField(max_length=50, editable=False, default="", null=True)
status = models.CharField(blank=True, max_length=20, db_index=True, default="")
expiration = models.DateTimeField(default=timezone.now, editable=True)
notified = models.DateTimeField(default=None, blank=True, null=True)
deleted = models.BooleanField(default=False, db_index=True)
is_cloning = models.BooleanField(default=False)
delete_user = models.ForeignKey(User, null=True, blank=True, editable=False, on_delete=models.CASCADE)
class Meta:
managed = True
db_table = "export_runs"
verbose_name = "ExportRun (DataPack)"
verbose_name_plural = "ExportRuns (DataPacks)"
def __str__(self):
return "{0}".format(str(self.uid))
def soft_delete(self, user=None, *args, **kwargs):
from eventkit_cloud.tasks.export_tasks import cancel_run
from eventkit_cloud.tasks.signals import exportrun_delete_exports
exportrun_delete_exports(self.__class__, self)
username = None
if user:
self.delete_user = user
username = user.username
self.deleted = True
logger.info("Deleting run {0} by user {1}".format(str(self.uid), user))
cancel_run(export_run_uid=self.uid, canceling_username=username, delete=True)
self.save()
self.soft_delete_notifications(*args, **kwargs)
def clone(self, download_data=True):
data_provider_task_records = list(self.data_provider_task_records.exclude(provider__slug=""))
parent_id = self.id
self.pk = None
self.id = None
self.uid = None
self.expiration = timezone.now() + timezone.timedelta(days=14)
self.created_at = timezone.now()
self.started_at = None
self.finished_at = None
self.save()
for data_provider_task_record in data_provider_task_records:
if data_provider_task_record.provider:
dptr = data_provider_task_record.clone(self)
if not self.data_provider_task_records.filter(id=dptr.id):
self.data_provider_task_records.add(dptr)
self.parent_run = ExportRun.objects.get(id=parent_id)
self.is_cloning = True
self.deleted = False
self.save()
if download_data:
self.download_data()
self.is_cloning = False
self.save()
return self
def download_data(self):
"""
Downloads the data for a run into the staging directory.
This is helpful when wanting to clone a run but delay the downloading of the data
onto a fresh node if not using shared storage.
"""
# This logic was considered in each related model to the run, but was mostly just passing flags through
# to both keep existing models and/or download data for the related models. This became messy and unnecessary,
# since cloning and managing datapacks is mostly done at the run level. If managing data fell to the data
# provider or task level, then it doesn't make sense to have a
# complicated helper function like this for each model.
from eventkit_cloud.tasks.helpers import download_run_directory, make_file_downloadable
previous_run = self.parent_run
download_run_directory(previous_run, self)
data_provider_task_records = (
self.data_provider_task_records.exclude(slug="run")
.prefetch_related("tasks__result")
.select_related("preview")
)
for data_provider_task_record in data_provider_task_records:
file_models = [data_provider_task_record.preview]
export_task_record: ExportTaskRecord
for export_task_record in data_provider_task_record.tasks.all():
file_models.append(export_task_record.result)
for file_model in file_models:
if not file_model:
continue
# strip the old run uid off the filename and add a new one.
filename = Path(str(self.uid)).joinpath(Path(file_model.filename).relative_to(str(previous_run.uid)))
file_model.filename = filename
filename, download_url = make_file_downloadable(file_model.get_file_path(staging=True))
file_model.download_url = download_url
file_model.save()
self.is_cloning = False
self.save()
class ExportRunFile(UIDMixin, TimeStampedModelMixin):
"""
The ExportRunFile stores additional files to be added to each ExportRun zip archive.
"""
storage = None
if settings.USE_S3:
storage = S3Boto3Storage()
else:
storage = FileSystemStorage(location=settings.EXPORT_RUN_FILES, base_url=settings.EXPORT_RUN_FILES_DOWNLOAD)
file = models.FileField(verbose_name="File", storage=storage)
directory = models.CharField(
max_length=100, null=True, blank=True, help_text="An optional directory name to store the file in."
)
provider = models.ForeignKey(
DataProvider,
on_delete=models.CASCADE,
related_name="file_provider",
null=True,
blank=True,
help_text="An optional data provider to associate the file with.",
)
def save(self, *args, **kwargs):
if self.pk:
export_run_file = ExportRunFile.objects.get(id=self.id)
if export_run_file.file != self.file:
export_run_file.file.delete(save=False)
super(ExportRunFile, self).save(*args, **kwargs)
class DataProviderTaskRecord(UIDMixin, TimeStampedModelMixin, TimeTrackingModelMixin):
"""
The DataProviderTaskRecord stores the task information for a specific provider.
"""
from eventkit_cloud.jobs.models import MapImageSnapshot
name = models.CharField(max_length=100, blank=True)
slug = LowerCaseCharField(max_length=40, default="")
provider = models.ForeignKey(
DataProvider, on_delete=models.CASCADE, related_name="task_record_providers", null=True, blank=True
)
run = models.ForeignKey(ExportRun, related_name="data_provider_task_records", on_delete=models.CASCADE)
status = models.CharField(blank=True, max_length=20, db_index=True)
display = models.BooleanField(default=False)
estimated_size = models.FloatField(null=True, blank=True)
estimated_duration = models.FloatField(null=True, blank=True)
preview = models.ForeignKey(
MapImageSnapshot, blank=True, null=True, on_delete=models.SET_NULL, help_text="A preview for a provider task."
)
class Meta:
ordering = ["name"]
managed = True
db_table = "data_provider_task_records"
unique_together = ["provider", "run"]
def __str__(self):
return "DataProviderTaskRecord uid: {0}".format(str(self.uid))
def clone(self, run: ExportRun):
"""
The ExportRun needs to be a **new** run, otherwise integrity errors will happen.
"""
export_task_records = list(self.tasks.all())
preview = self.preview
self.id = None
self.uid = None
self.run = run
self.save()
for export_task_record in export_task_records:
etr = export_task_record.clone(self)
if not self.tasks.filter(id=etr.id).exists():
self.tasks.add(etr)
if preview:
self.preview = preview.clone()
return self
class UserDownload(UIDMixin):
"""
Model that stores each DataPack download event.
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="downloads")
downloaded_at = models.DateTimeField(verbose_name="Time of Download", default=timezone.now, editable=False)
downloadable = models.ForeignKey(FileProducingTaskResult, on_delete=models.CASCADE, related_name="downloads")
class Meta:
ordering = ["-downloaded_at"]
@property
def job(self):
if self.downloadable.export_task:
return self.downloadable.export_task.export_provider_task.run.job
if self.downloadable.run:
return self.downloadable.run.job
@property
def provider(self):
if self.downloadable.export_task:
return self.downloadable.export_task.export_provider_task.provider
def clone(self):
self.id = None
self.uid = None
self.save()
return self
class ExportTaskRecord(UIDMixin, TimeStampedModelMixin, TimeTrackingModelMixin):
"""
An ExportTaskRecord holds the information about the process doing the actual work for a task.
"""
celery_uid = models.UUIDField(null=True) # celery task uid
name = models.CharField(max_length=100)
export_provider_task = models.ForeignKey(DataProviderTaskRecord, related_name="tasks", on_delete=models.CASCADE)
status = models.CharField(blank=True, max_length=20, db_index=True)
pid = models.IntegerField(blank=True, default=-1)
worker = models.CharField(max_length=100, blank=True, editable=False, null=True)
cancel_user = models.ForeignKey(User, null=True, blank=True, editable=False, on_delete=models.CASCADE)
display = models.BooleanField(default=False)
result = models.OneToOneField(
"FileProducingTaskResult", on_delete=models.CASCADE, null=True, blank=True, related_name="export_task"
)
hide_download = models.BooleanField(default=False)
class Meta:
ordering = ["created_at"]
managed = True
db_table = "export_task_records"
unique_together = ["name", "export_provider_task"]
def __str__(self):
return "ExportTaskRecord uid: {0}".format(str(self.uid))
@property
def progress(self):
if TaskState[self.status] in TaskState.get_finished_states():
return 100
return get_cache_value(obj=self, attribute="progress", default=0)
@progress.setter
def progress(self, value, expiration=DEFAULT_CACHE_EXPIRATION):
return set_cache_value(obj=self, attribute="progress", value=value, expiration=expiration)
@property
def estimated_finish(self):
if TaskState[self.status] in TaskState.get_finished_states():
return
return get_cache_value(obj=self, attribute="estimated_finish", default=0)
@estimated_finish.setter
def estimated_finish(self, value, expiration=DEFAULT_CACHE_EXPIRATION):
return set_cache_value(obj=self, attribute="estimated_finish", value=value, expiration=expiration)
def clone(self, data_provider_task_record: DataProviderTaskRecord):
# Get the exceptions from the old ExportTaskRecord
exceptions = list(self.exceptions.all())
# Create a new FPTR now because we can't clone the ETR with the old FPTR since it has a unique constraint.
if self.result:
file_producing_task_result = self.result.clone()
file_producing_task_result.id = None
file_producing_task_result.uid = None
file_producing_task_result.save()
self.result = file_producing_task_result
# Create the new ExportTaskRecord
self.id = None
self.uid = None
self.export_provider_task = data_provider_task_record
self.save()
# Add the exceptions to the new ExportTaskRecord
for exception in exceptions:
e = exception.clone()
if not self.exceptions.filter(id=e.id).exists():
self.exceptions.add(e)
self.save()
return self
class ExportTaskException(TimeStampedModelMixin):
"""
Model to store ExportTaskRecord exceptions for auditing.
"""
id = models.AutoField(primary_key=True, editable=False)
task = models.ForeignKey(ExportTaskRecord, related_name="exceptions", on_delete=models.CASCADE)
exception = models.TextField(editable=False)
class Meta:
managed = True
db_table = "export_task_exceptions"
def clone(self):
self.id = None
self.uid = None
self.save()
return self
def prefetch_export_runs(queryset_list_or_model):
prefetch_args = [
"job__data_provider_tasks__provider",
"job__data_provider_tasks__formats",
"data_provider_task_records__tasks__result",
"data_provider_task_records__tasks__exceptions",
]
if isinstance(queryset_list_or_model, models.query.QuerySet):
return queryset_list_or_model.select_related("user").prefetch_related(*prefetch_args)
elif isinstance(queryset_list_or_model, list):
models.prefetch_related_objects(queryset_list_or_model, *prefetch_args)
elif isinstance(queryset_list_or_model, ExportRun):
models.prefetch_related_objects([queryset_list_or_model], *prefetch_args)
return queryset_list_or_model
class RunZipFile(UIDMixin, TimeStampedModelMixin, TimeTrackingModelMixin):
"""
Model to store zip files associated with ExportRun objects.
"""
run = models.ForeignKey(ExportRun, on_delete=models.CASCADE, related_name="zip_files", null=True, blank=True)
data_provider_task_records = models.ManyToManyField(DataProviderTaskRecord)
downloadable_file = models.ForeignKey(FileProducingTaskResult, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"RunZipFile uid: {self.uid}"
@property
def message(self):
return get_cache_value(obj=self, attribute="message", default="")
@message.setter
def message(self, value, expiration=DEFAULT_CACHE_EXPIRATION):
return set_cache_value(obj=self, attribute="message", value=value, expiration=expiration)
@property
def status(self):
return get_cache_value(obj=self, attribute="status", default="")
@status.setter
def status(self, value, expiration=DEFAULT_CACHE_EXPIRATION):
return set_cache_value(obj=self, attribute="status", value=value, expiration=expiration)
def get_run_zip_file_slug_sets(new_run, old_run_zip_files):
"""
:param old_run_zip_files: A list of run zip files.
:return: A set of provider slugs for each zip file.
"""
data_provider_task_records = new_run.data_provider_task_records.exclude(provider__isnull=True)
all_run_zip_file_slugs = [
data_provider_task_record.provider.slug for data_provider_task_record in data_provider_task_records
]
run_zip_file_slug_sets = []
for old_run_zip_file in old_run_zip_files:
run_zip_file_slug_set = []
for data_provider_task_record in old_run_zip_file.data_provider_task_records.all():
run_zip_file_slug_set.append(data_provider_task_record.provider.slug)
# Don't rerun the overall project zip file.
if all_run_zip_file_slugs != run_zip_file_slug_set:
run_zip_file_slug_sets.append(run_zip_file_slug_set)
return run_zip_file_slug_sets
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Tue May 3 20:25:51 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import sip
import sys
import time
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
Qt.QWidget.__init__(self)
self.setWindowTitle("Top Block")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 200e6
##################################################
# Blocks
##################################################
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(2),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(2219e6, 0)
self.uhd_usrp_source_0.set_gain(0, 0)
self.uhd_usrp_source_0.set_antenna("RX2", 0)
self.uhd_usrp_source_0.set_center_freq(0, 1)
self.uhd_usrp_source_0.set_gain(0, 1)
self.uhd_usrp_source_0.set_antenna("RX2", 1)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_1_win)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_0_win)
##################################################
# Connections
##################################################
self.connect((self.uhd_usrp_source_0, 1), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_freq_sink_x_1, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.samp_rate)
def main(top_block_cls=top_block, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
|
"""
$url play.afreecatv.com
$type live
"""
import logging
import re
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.stream.hls import HLSStreamReader, HLSStreamWriter
log = logging.getLogger(__name__)
class AfreecaHLSStreamWriter(HLSStreamWriter):
def should_filter_sequence(self, sequence):
return "preloading" in sequence.segment.uri or super().should_filter_sequence(sequence)
class AfreecaHLSStreamReader(HLSStreamReader):
__writer__ = AfreecaHLSStreamWriter
class AfreecaHLSStream(HLSStream):
__reader__ = AfreecaHLSStreamReader
@pluginmatcher(re.compile(
r"https?://play\.afreecatv\.com/(?P<username>\w+)(?:/(?P<bno>:\d+))?"
))
class AfreecaTV(Plugin):
_re_bno = re.compile(r"var nBroadNo = (?P<bno>\d+);")
CHANNEL_API_URL = "http://live.afreecatv.com/afreeca/player_live_api.php"
CHANNEL_RESULT_OK = 1
QUALITYS = ["original", "hd", "sd"]
QUALITY_WEIGHTS = {
"original": 1080,
"hd": 720,
"sd": 480,
}
_schema_channel = validate.Schema(
{
"CHANNEL": {
"RESULT": validate.transform(int),
validate.optional("BPWD"): str,
validate.optional("BNO"): str,
validate.optional("RMD"): str,
validate.optional("AID"): str,
validate.optional("CDN"): str,
}
},
validate.get("CHANNEL")
)
_schema_stream = validate.Schema(
{
validate.optional("view_url"): validate.url(
scheme=validate.any("rtmp", "http")
),
"stream_status": str,
}
)
arguments = PluginArguments(
PluginArgument(
"username",
sensitive=True,
requires=["password"],
metavar="USERNAME",
help="The username used to register with afreecatv.com."
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
help="A afreecatv.com account password to use with --afreeca-username."
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached AfreecaTV credentials to initiate a new session
and reauthenticate.
"""),
)
def __init__(self, url):
super().__init__(url)
self._authed = (
self.session.http.cookies.get("PdboxBbs")
and self.session.http.cookies.get("PdboxSaveTicket")
and self.session.http.cookies.get("PdboxTicket")
and self.session.http.cookies.get("PdboxUser")
and self.session.http.cookies.get("RDB")
)
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, "afreeca"
return Plugin.stream_weight(key)
def _get_channel_info(self, broadcast, username):
data = {
"bid": username,
"bno": broadcast,
"from_api": "0",
"mode": "landing",
"player_type": "html5",
"pwd": "",
"stream_type": "common",
"type": "live",
}
res = self.session.http.post(self.CHANNEL_API_URL, data=data)
return self.session.http.json(res, schema=self._schema_channel)
def _get_hls_key(self, broadcast, username, quality):
data = {
"bid": username,
"bno": broadcast,
"from_api": "0",
"mode": "landing",
"player_type": "html5",
"pwd": "",
"quality": quality,
"stream_type": "common",
"type": "aid",
}
res = self.session.http.post(self.CHANNEL_API_URL, data=data)
return self.session.http.json(res, schema=self._schema_channel)
def _get_stream_info(self, broadcast, quality, rmd):
params = {
"return_type": "gs_cdn_pc_web",
"broad_key": f"{broadcast}-common-{quality}-hls",
}
res = self.session.http.get(f"{rmd}/broad_stream_assign.html", params=params)
return self.session.http.json(res, schema=self._schema_stream)
def _get_hls_stream(self, broadcast, username, quality, rmd):
keyjson = self._get_hls_key(broadcast, username, quality)
if keyjson["RESULT"] != self.CHANNEL_RESULT_OK:
return
key = keyjson["AID"]
info = self._get_stream_info(broadcast, quality, rmd)
if "view_url" in info:
return AfreecaHLSStream(self.session, info["view_url"], params={"aid": key})
def _login(self, username, password):
data = {
"szWork": "login",
"szType": "json",
"szUid": username,
"szPassword": password,
"isSaveId": "true",
"isSavePw": "false",
"isSaveJoin": "false",
"isLoginRetain": "Y",
}
res = self.session.http.post("https://login.afreecatv.com/app/LoginAction.php", data=data)
data = self.session.http.json(res)
log.trace(f"{data!r}")
if data["RESULT"] == self.CHANNEL_RESULT_OK:
self.save_cookies()
return True
else:
return False
def _get_streams(self):
login_username = self.get_option("username")
login_password = self.get_option("password")
self.session.http.headers.update({"Referer": self.url, "Origin": "http://play.afreecatv.com"})
if self.options.get("purge_credentials"):
self.clear_cookies()
self._authed = False
log.info("All credentials were successfully removed")
if self._authed:
log.debug("Attempting to authenticate using cached cookies")
elif login_username and login_password:
log.debug("Attempting to login using username and password")
if self._login(login_username, login_password):
log.info("Login was successful")
else:
log.error("Failed to login")
m = self.match.groupdict()
username = m["username"]
bno = m["bno"]
if bno is None:
res = self.session.http.get(self.url)
m = self._re_bno.search(res.text)
if not m:
log.error("Could not find broadcast number.")
return
bno = m.group("bno")
channel = self._get_channel_info(bno, username)
log.trace(f"{channel!r}")
if channel.get("BPWD") == "Y":
log.error("Stream is Password-Protected")
return
elif channel.get("RESULT") == -6:
log.error("Login required")
return
elif channel.get("RESULT") != self.CHANNEL_RESULT_OK:
return
(broadcast, rmd) = (channel["BNO"], channel["RMD"])
if not (broadcast and rmd):
return
for qkey in self.QUALITYS:
hls_stream = self._get_hls_stream(broadcast, username, qkey, rmd)
if hls_stream:
yield qkey, hls_stream
__plugin__ = AfreecaTV
|
|
from __future__ import division, print_function
import numpy as np
from .transform import Transform
from .utils import prepare as prepare_dataset
__all__ = ['Fourier', 'filter']
class Fourier(Transform):
def __init__(self, kx, ky, S, cellsize=25, area=None):
self.kx = kx
self.ky = ky
self.S = S
if isinstance(cellsize, int):
cellsize = (cellsize, cellsize)
self.cellsize = cellsize
if area is None:
# area = cellsize[0] * S.shape[0] * cellsize[1] * S.shape[1]
area = S.shape[0] * S.shape[1]
self.area = area
@property
def shape(self):
"""return the shape of the fourier transform spectrum"""
return self.S.shape
@property
def amplitude(self):
"""return the amplitude as 2*abs(S/datasize)"""
return 2*np.absolute(self.S/self.area)
@property
def steepness(self):
"""return the steepness as H*K or H/L with H = 2*amplitude"""
return 2* self.amplitude * self.K
@property
def K(self):
"""return a matrix with the wave numbers"""
Kx, Ky = np.meshgrid(self.kx, self.ky)
return (Kx**2+Ky**2)**.5
@classmethod
def transform(cls, x, y, Z, taper=False, remove_avg=False):
"""
returns a shifted fourier transform (fft2) of the data
:param x: numpy array (1D) with x coordinates
:param y: numpy array (1D) with y coordinates
:param Z: numpy array (2D) with data values (no numpy MaskedArray)
:return: Fourier object
the returned value is is Fourier object with the following important attributes:
.kx wave number in horizontal direction
.ky wave number in vertical direction
.S spectrum
.K absolute wave number (kx**2+ky**2)**.5
.amplitude amplitude representation
.steepness amplitude*absolute wave number
.shape shape of the spectrum
and the following methods:
.reverse() reverse the fourier to retrieve the data
.plot(axes) plot the fourier transform on the supplied axis
.apply_mask(mask) multiplies the spectrum by the supplied mask to modify the spectrum (eg. filtering)
"""
Z = cls.prepare(Z, taper=taper, avg=remove_avg)
# difference first 2 values along x and y
cellsize = (np.abs(x[1] - x[0]), np.abs(y[1] - y[0]))
if (np.absolute(np.diff(x) - cellsize[0]) > cellsize[0]*.001).any():
raise ValueError('x not equally spaced')
if (np.absolute(np.diff(y) - cellsize[1]) > cellsize[1]*.001).any():
raise ValueError('y not equally spaced')
if isinstance(Z, np.ma.MaskedArray):
raise TypeError('input for fourier transform may not be of type MaskedArray')
# create wave number vectors
kx = 2*np.pi*np.linspace(-.5, .5, Z.shape[1])/cellsize[1]
ky = 2*np.pi*np.linspace(-.5, .5, Z.shape[0])/cellsize[0]
# spectrum
S = np.fft.fftshift(np.fft.fft2(Z))
# return a fourier object
return cls(kx, ky, S, cellsize=cellsize)
def reverse(self, shape=None, nanmask=None):
"""
return a dataset from the fourier spectrum
:param shape: shape of the output (should be equal to the original dataset)
:param nanmask: numpy bool array of nan values where true means to mask
:return: new dataset
"""
# determine shape
if shape is None:
shape = self.kx.shape[0], self.ky.shape[0]
# shift spectrum
S = np.fft.ifftshift(self.S)
# reverse fourier of spectrum
data = np.real(np.fft.ifft2(S, shape))
# apply mask for missing values
if nanmask is not None:
return np.ma.array(data, mask=nanmask)
else:
return data
def apply_mask(self, m, shift=False):
"""
:param m:
:param shift:
:return:
"""
if shift:
m = np.fft.fftshift(m)
S = self.S*m
return type(self)(self.kx, self.ky, S,
cellsize=self.cellsize,
area=self.area)
def filter(self, kmin=0, kmax=np.inf, theta=None, theta_offset=180, dirmode='deg', spatial_frequencies=True):
"""
Filter the spectrum based on wave number or angle
:param kmin: mimumum wave number as spatial or angular (see spatial_frequencies) with default 0
:param kmax: maximum wave number as spatial or angular (see spatial_frequencies) with default np.inf
:param theta: mean angle of features to keep
:param theta_offset: offset from theta to either side
:param dirmode: specifies angle definition (deg|rad)
:param spatial_frequencies: specifies if wave numbers are spatial (1/L) or angular (2pi/L)
:return: new Fourier object with masked frequencies
"""
if spatial_frequencies:
kmin = 2 * np.pi * kmin
kmax = 2 * np.pi * kmax
mask = (self.K > kmin) & (self.K < kmax)
if theta is not None:
if dirmode == 'deg':
theta = theta * np.pi / 180
theta_offset = theta_offset * np.pi / 180
elif dirmode == 'rad':
pass
else:
raise ValueError('invalid direction mode')
x = np.linspace(-1, 1, self.kx.size)
y = np.linspace(-1, 1, self.ky.size)
X, Y = np.meshgrid(x, y)
angle_diff = np.absolute((theta - np.arctan2(Y, X) + .5 * np.pi) % np.pi - .5 * np.pi)
mask = np.logical_and(mask, angle_diff <= theta_offset)
return self.apply_mask(mask)
def plot(self, ax, attr='steepness', labelsize=14, zero_lines=True, spatial_frequencies=True, vmin=0, cmap='inferno', **kw):
if spatial_frequencies:
x = self.kx / 2 * np.pi
y = self.ky / 2 * np.pi
xlabel = r'$\xi_x$'
ylabel = r'$\xi_y$'
else:
x = self.kx
y = self.ky
xlabel = r'$k_x$'
ylabel = r'$k_y$'
c = ax.pcolormesh(x, y, getattr(self, attr), cmap=cmap, vmin=vmin, **kw)
ax.set_xlabel(xlabel, size=labelsize)
ax.set_ylabel(ylabel, size=labelsize)
ax.set_aspect('equal')
if zero_lines:
ax.axhline(0, color=(.9, .9, .9), lw=.5)
ax.axvline(0, color=(.9, .9, .9), lw=.5)
return c
@classmethod
def prepare(cls, data, taper=True, avg=True):
return prepare_dataset(data, unmask=True, taper=taper, avg=avg)
def filter(x, y, data, **kwargs):
return Fourier.transform(x, y, data)\
.filter(**kwargs)\
.reverse(nanmask=data.mask)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from testtools import matchers
from keystone.common import serializer
from keystone import tests
from keystone.tests import matchers as ksmatchers
class XmlSerializerTestCase(tests.TestCase):
def assertSerializeDeserialize(self, d, xml, xmlns=None):
self.assertThat(
serializer.to_xml(copy.deepcopy(d), xmlns),
ksmatchers.XMLEquals(xml))
self.assertEqual(serializer.from_xml(xml), d)
# operations should be invertible
self.assertEqual(
serializer.from_xml(serializer.to_xml(copy.deepcopy(d), xmlns)),
d)
self.assertThat(
serializer.to_xml(serializer.from_xml(xml), xmlns),
ksmatchers.XMLEquals(xml))
def test_auth_request(self):
d = {
"auth": {
"passwordCredentials": {
"username": "test_user",
"password": "mypass"
},
"tenantName": "customer-x"
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<auth xmlns="http://docs.openstack.org/identity/api/v2.0"
tenantName="customer-x">
<passwordCredentials
username="test_user"
password="mypass"/>
</auth>
"""
self.assertSerializeDeserialize(d, xml)
def test_role_crud(self):
d = {
"role": {
"id": "123",
"name": "Guest",
"description": "Guest Access"
}
}
# TODO(dolph): examples show this description as an attribute?
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<role xmlns="http://docs.openstack.org/identity/api/v2.0"
id="123"
name="Guest">
<description>Guest Access</description>
</role>
"""
self.assertSerializeDeserialize(d, xml)
def test_service_crud(self):
xmlns = "http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0"
d = {
"OS-KSADM:service": {
"id": "123",
"name": "nova",
"type": "compute",
"description": "OpenStack Compute Service"
}
}
# TODO(dolph): examples show this description as an attribute?
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<service
xmlns="%(xmlns)s"
type="compute"
id="123"
name="nova">
<description>OpenStack Compute Service</description>
</service>
""" % {'xmlns': xmlns}
self.assertSerializeDeserialize(d, xml, xmlns=xmlns)
def test_tenant_crud(self):
d = {
"tenant": {
"id": "1234",
"name": "ACME corp",
"description": "A description...",
"enabled": True
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<tenant
xmlns="http://docs.openstack.org/identity/api/v2.0"
enabled="true"
id="1234"
name="ACME corp">
<description>A description...</description>
</tenant>
"""
self.assertSerializeDeserialize(d, xml)
def test_tenant_crud_no_description(self):
d = {
"tenant": {
"id": "1234",
"name": "ACME corp",
"description": "",
"enabled": True
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<tenant
xmlns="http://docs.openstack.org/identity/api/v2.0"
enabled="true"
id="1234"
name="ACME corp">
<description></description>
</tenant>
"""
self.assertSerializeDeserialize(d, xml)
def test_policy_list(self):
d = {"policies": [{"id": "ab12cd"}]}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<policies xmlns="http://docs.openstack.org/identity/api/v2.0">
<policy id="ab12cd"/>
</policies>
"""
self.assertThat(serializer.to_xml(d), ksmatchers.XMLEquals(xml))
def test_values_list(self):
d = {
"objects": {
"values": [{
"attribute": "value1",
}, {
"attribute": "value2",
}]
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<objects xmlns="http://docs.openstack.org/identity/api/v2.0">
<object attribute="value1"/>
<object attribute="value2"/>
</objects>
"""
self.assertThat(serializer.to_xml(d), ksmatchers.XMLEquals(xml))
def test_collection_list(self):
d = {
"links": {
"next": "http://localhost:5000/v3/objects?page=3",
"previous": None,
"self": "http://localhost:5000/v3/objects"
},
"objects": [{
"attribute": "value1",
"links": {
"self": "http://localhost:5000/v3/objects/abc123def",
"anotherobj": "http://localhost:5000/v3/anotherobjs/123"
}
}, {
"attribute": "value2",
"links": {
"self": "http://localhost:5000/v3/objects/abc456"
}
}]}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<objects xmlns="http://docs.openstack.org/identity/api/v2.0">
<object attribute="value1">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc123def"/>
<link rel="anotherobj"
href="http://localhost:5000/v3/anotherobjs/123"/>
</links>
</object>
<object attribute="value2">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc456"/>
</links>
</object>
<links>
<link rel="self"
href="http://localhost:5000/v3/objects"/>
<link rel="next"
href="http://localhost:5000/v3/objects?page=3"/>
</links>
</objects>
"""
self.assertSerializeDeserialize(d, xml)
def test_collection_member(self):
d = {
"object": {
"attribute": "value",
"links": {
"self": "http://localhost:5000/v3/objects/abc123def",
"anotherobj": "http://localhost:5000/v3/anotherobjs/123"}}}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<object xmlns="http://docs.openstack.org/identity/api/v2.0"
attribute="value">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc123def"/>
<link rel="anotherobj"
href="http://localhost:5000/v3/anotherobjs/123"/>
</links>
</object>
"""
self.assertSerializeDeserialize(d, xml)
def test_v2_links_special_case(self):
# There's special-case code (for backward compatibility) where if the
# data is the v2 version data, the link elements are also added to the
# main element.
d = {
"object": {
"id": "v2.0",
"status": "deprecated",
"updated": "2014-04-17T00:00:00Z",
"links": [{"href": "http://localhost:5000/v2.0/",
"rel": "self"},
{"href": "http://docs.openstack.org/api/openstack-"
"identity-service/2.0/content/",
"type": "text/html", "rel": "describedby"},
{"href": "http://docs.openstack.org/api/openstack-"
"identity-service/2.0/"
"identity-dev-guide-2.0.pdf",
"type": "application/pdf", "rel": "describedby"}]
}}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<object xmlns="http://docs.openstack.org/identity/api/v2.0"
id="v2.0" status="deprecated" updated="2014-04-17T00:00:00Z">
<links>
<link rel="self" href="http://localhost:5000/v2.0/"/>
<link rel="describedby"
href="http://docs.openstack.org/api/openstack-\
identity-service/2.0/content/" type="text/html"/>
<link rel="describedby"
href="http://docs.openstack.org/api/openstack-\
identity-service/2.0/identity-dev-guide-2.0.pdf" type="application/pdf"/>
</links>
<link rel="self" href="http://localhost:5000/v2.0/"/>
<link rel="describedby"
href="http://docs.openstack.org/api/openstack-\
identity-service/2.0/content/" type="text/html"/>
<link rel="describedby"
href="http://docs.openstack.org/api/openstack-\
identity-service/2.0/identity-dev-guide-2.0.pdf" type="application/pdf"/>
</object>
"""
self.assertThat(serializer.to_xml(d), ksmatchers.XMLEquals(xml))
def test_xml_with_namespaced_attribute_to_dict(self):
expected = {
"user": {
"username": "test_user",
"OS-KSADM:password": "mypass",
},
}
xmlns = 'http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0'
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<user xmlns="http://docs.openstack.org/identity/api/v2.0"
xmlns:OS-KSADM="%(xmlns)s"
username="test_user"
OS-KSADM:password="mypass"/>
""" % dict(xmlns=xmlns)
self.assertThat(serializer.from_xml(xml), matchers.Equals(expected))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ExpressRouteCircuitAuthorizationsOperations(object):
"""ExpressRouteCircuitAuthorizationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def delete(
self, resource_group_name, circuit_name, authorization_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, circuit_name, authorization_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, authorization_name, authorization_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an authorization in the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or
update express route circuit authorization operation.
:type authorization_parameters:
:class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ExpressRouteCircuitAuthorizationPaged
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorizationPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitAuthorizationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitAuthorizationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
#!/usr/bin/python
import re
import time,logging
import argparse, string,datetime,sys
import afs
from afs.util.AfsConfig import parseDefaultConfig
from afs.util.afsutil import parseHumanWriteableSize
from afs.service.OSDVolService import OSDVolService
from afs.service.OSDCellService import OSDCellService
from afs.service.OSDFsService import OSDFsService
from afs.service.ProjectService import ProjectService
from afs.model.Volume import Volume
from afs.lla.VolumeLLA import VolumeLLA
from afs.lla.VLDbLLA import VLDbLLA
global FS,PS,VS,VD,VlD
myParser=argparse.ArgumentParser(parents=[afs.argParser], add_help=False)
myParser.add_argument("--ssrv", required=True, help="server to empty")
myParser.add_argument("--spart", help="partition on server to empty. defaults to all")
myParser.add_argument("--dsrv" , help="force server to be filled. Otherwise chosen automatically by project.")
myParser.add_argument("--dpart" , help="force partition on server to be filled. Otherwise chosen automatically by project.")
group = myParser.add_mutually_exclusive_group()
group.add_argument("--ignorerx", dest="ignoreRX", action="append", help="regEx for volumenames to ignore. All volumes not matching this will be moved.")
group.add_argument("--onlyrx", dest="onlyRX", action="append", help="regEx for volumenames to include all volumes not matching this will be ignored.")
group.add_argument("--ignoreproject", dest="ignoreProjects", action="append", help="ignore volumes of given project.")
group.add_argument("--onlyproject", dest="onlyProjects", action="append", help="only move volumes of given project.")
myParser.add_argument("--dryrun",action="store_true", help="Just print out what would be done, but don't do it.")
myParser.add_argument("--maxnum", default = 0, type=int, help="max number of Volumes to move.")
myParser.add_argument("--untilfree", default = "0", help="move until # is free on spart.")
myParser.add_argument("--rwvols", dest="moveRWVols", default=False, action="store_true", help="move rwvols with their accompanying ROs.")
myParser.add_argument("--solitaryrovols", dest="moveSolitaryROVols", default=False, action="store_true", help="move solitary rovols.")
myParser.add_argument("--minsize", dest="minVolumeUsage", default="0", help="only move volumes with minimalsize of")
myParser.add_argument("--osdvolumes", dest="moveOSDVOlumes", default=False, action="store_true", help="also move OSD-Volumes")
parseDefaultConfig(myParser)
FS=OSDFsService()
PS=ProjectService()
VS=OSDVolService()
VD=VolumeLLA()
VlD=VLDbLLA()
if not afs.defaultConfig.moveRWVols and not afs.defaultConfig.moveSolitaryROVols :
sys.stderr.write("If you want to nmake me do anything, specify --rwvols and/or --solitaryrovols\n")
sys.exit(1)
if afs.defaultConfig.ignoreRX != None :
ignoreRX=[]
for rx in afs.defaultConfig.ignoreRX :
try :
ignoreRX.append(re.compile(rx))
except :
sys.stderr.write("Cannot compile regular expression: '%s'\n" % rx)
sys.exit(1)
elif afs.defaultConfig.onlyRX != None :
onlyRX=[]
for rx in afs.defaultConfig.onlyRX :
try :
onlyRX.append(re.compile(rx))
except :
sys.stderr.write("Cannot compile regular expression: '%s'\n" % rx)
sys.exit(1)
srcFS=FS.getFileServer(afs.defaultConfig.ssrv)
if srcFS == None :
sys.stderr.write("src server %s does not exist." % srcFS)
sys.exit(2)
if afs.defaultConfig.spart != None :
if not afs.defaultConfig.spart in srcFS.parts :
sys.stderr.write("Partition %s does not exist on server %s\n" % (afs.defaultConfig.spart, afs.defaultConfig.ssrv))
sys.exit(2)
# we cycle thorugh this list later
srcParts=[afs.defaultConfig.spart]
else :
srcParts=srcFS.parts
if afs.defaultConfig.dsrv != None :
dstFS=FS.getFileServer(afs.defaultConfig.dsrv)
if dstFS == None :
sys.stderr.write("destination server %s does not exist.\n" % srcFS)
sys.exit(2)
if afs.defaultConfig.dpart != None :
if not afs.defaultConfig.dpart in dstFS.parts :
sys.stderr.write("Partition %s does not exist on server %s\n" % (afs.defaultConfig.dpart, afs.defaultConfig.dsrv))
sys.exit(2)
else :
reqDstPart=afs.defaultConfig.dpart
else :
reqDstPart=None
else :
dstFS=None
if afs.defaultConfig.dpart != None :
sys.stderr.write("Warning: ignoring given dpart=%s, because no dsrv has been specified.\n" % afs.defaultConfig.dpart)
# XXX we should handle everything internally with bytes
untilFree=parseHumanWriteableSize(afs.defaultConfig.untilfree)/1024
minUsage=parseHumanWriteableSize(afs.defaultConfig.minVolumeUsage)/1024
VolObj = Volume()
movedVolcount=0
for srcP in srcParts :
print "Processing Partition %s...." % srcP
# check if partition is freed enough
parts=FS.getPartitions(afs.defaultConfig.ssrv)
if parts[srcP]["free"] > untilFree and untilFree > 0 :
print "already %s Bytes free on spart %s." % (afs.util.afsutil.humanReadableSize(parts[srcP]["free"]),srcP )
continue
# get list of volumes to move
srcVolList=FS.getVolList(srcFS.servernames[0],srcP,cached=False)
# get RW Volumes :
RWVols=[]
solitaryROVols=[]
for v in srcVolList :
if "type" in v.keys() :
if v["type"] == "RW" : RWVols.append(v)
for v in srcVolList :
if "type" in v.keys() :
isSolitary = True
if v["type"] == "RO" :
for rw in RWVols :
if rw["name"] == v["name"][:-9] :
isSolitary = False
if isSolitary : solitaryROVols.append(v)
if afs.defaultConfig.moveRWVols :
for v in RWVols :
# check for name with given regex, these checks are mutually exclusive.
skip_it=False
if afs.defaultConfig.ignoreRX != None :
skip_it=False
for rx in ignoreRX :
if rx.match(v["name"]) : skip_it = True
elif afs.defaultConfig.onlyRX != None :
skip_it=True
for rx in onlyRX :
if rx.match(v["name"]) : skip_it = False
elif afs.defaultConfig.onlyProjects != None :
skip_it=True
volProjects=PS.getProjectsByVolumeName(v["name"])
for prj in volProjects :
if prj.name in afs.defaultConfig.onlyProjects : skip_it = False
elif afs.defaultConfig.ignoreProjects != None :
skip_it=False
volProjects=PS.getProjectsByVolumeName(v["name"])
for prj in volProjects :
if prj.name in afs.defaultConfig.ignoreProjects : skip_it = True
if skip_it : continue
# check for moving osd-volumes
if v.get("osdPolicy",0) != 0 :
if not afs.defaultConfig.moveOSDVOlumes :
print "Skipping %s, because it is an OSD-Volume" % v["name"]
continue
# check for minSize
if minUsage != 0 :
if int(v.get("diskused")) < minUsage :
print "Skipping %s, because it is smaller than %s" % (v["name"], minUsage)
continue
# remove osd-attributes from dict, create Obj
v.pop("filequota")
v.pop("osdPolicy")
v["serv_uuid"]=afs.LookupUtil[afs.defaultConfig.CELL_NAME].getFSUUID(v["servername"])
VolObj.setByDict(v)
if dstFS == None :
minNestingLevel=-1 # NestingLevel should be "spezifizitaet"
for p in PS.getProjectsByVolumeName(v["name"]) :
if p.NestingLevel < minNestingLevel or minNestingLevel == -1 :
minNestingLevel = p.NestingLevel
PrjObj=p
dstSrv,dstP = PS.getNewVolumeLocation(PrjObj.name,VolObj)
if dstSrv == None :
sys.stderr.write("found no appropriate location for %s, part of project %s. Skipping\n" % (VolObj.name,PrjObj.name))
continue
else :
dstSrv = dstFS.servernames[0]
parts=FS.getPartitions(dstSrv)
if reqDstPart == None :
maxFree=0
for p in parts :
if parts[p]["free"] > maxFree :
dstP=p
maxFree=parts[p]["free"]
else :
dstP=reqDstPart
print "moving volume %s from %s %s to %s %s" % (v["name"],srcFS.servernames[0],srcP,dstSrv,dstP)
if not afs.defaultConfig.dryrun :
VD.move(v["name"],srcFS.servernames[0],srcP,dstSrv,dstP)
try :
# add RO to dstSrv if there is none yet.
hasRO=False
for ov in VS.getVolume("%s.readonly" % VolObj.name, cached=False) :
if ov.servername == dstSrv and ov.part == dstP :
hasRO = True
if hasRO : # find a second server to move the RO to then
dstSrv,dstP = PS.getNewVolumeLocation(PrjObj.name,ov)
if dstSrv == None :
sys.stderr.write("found no appropriate location for %s. Skipping\n" % VolObj.name)
continue
except : # there is no RO, so just skip this.
continue
print "Moving accompanying RO to %s %s" % (dstSrv,dstP)
if not afs.defaultConfig.dryrun :
VlD.addsite(v["name"],dstSrv,dstP)
VD.release(v["name"])
# only remove accompanying RO from srcSRV if we are sure there is one!
for ov in VS.getVolume("%s.readonly" % VolObj.name, cached=False) :
if ov.servername == srcFS.servernames[0] and ov.part == srcP :
VD.remove("%s.readonly" % v["name"],srcFS.servernames[0],srcP)
break
movedVolcount += 1
if movedVolcount > afs.defaultConfig.maxnum and afs.defaultConfig.maxnum > 0 :
print "moved %d volumes. Terminating." % afs.defaultConfig.maxnum
sys.exit(0)
# check if partition is freed enough
parts=FS.getPartitions(afs.defaultConfig.ssrv)
if parts[srcP]["free"] > untilFree and untilFree > 0 :
print "%s bytes free on spart %s." % (afs.util.afsutil.humanReadableSize(parts[srcP]["free"]),srcP )
break
if afs.defaultConfig.moveSolitaryROVols :
for v in solitaryROVols :
# get RWVolName
RWVolName=v['name'][:-len(".readonly")]
# check for moving osd-volumes
if v.get("osdPolicy",0) != 0 :
if not afs.defaultConfig.moveOSDVOlumes :
print "Skipping %s, because it is an OSD-Volume" % v["name"]
continue
# check for minSize
if minUsage != 0 :
if int(v.get("diskused")) < minUsage :
print "Skipping %s, because it is smaller than %s" % (v["name"], minUsage)
continue
if afs.defaultConfig.ignoreRX != None :
skip_it=False
for rx in ignoreRX :
if rx.match(RWVolName) : skip_it = True
elif afs.defaultConfig.onlyRX != None :
skip_it=True
for rx in onlyRX :
if rx.match(RWVolName) : skip_it = False
elif afs.defaultConfig.onlyProjects != None :
skip_it=True
volProjects=PS.getProjectsByVolumeName(RWVolName)
for prj in volProjects :
if prj.name in afs.defaultConfig.onlyProjects : skip_it = False
elif afs.defaultConfig.ignoreProjects != None :
skip_it=False
volProjects=PS.getProjectsByVolumeName(RWVolName)
for prj in volProjects :
if prj.name in afs.defaultConfig.ignoreProjects : skip_it = True
else :
skip_it = False
if skip_it : continue
# remove osd-attributes from dict, create object
v.pop("filequota")
v.pop("osdPolicy")
v["serv_uuid"]=afs.LookupUtil[afs.defaultConfig.CELL_NAME].getFSUUID(v["servername"])
VolObj.setByDict(v)
if dstFS == None :
# get actual stuff from live
#Vol=VS.getVolume(v["name"],cached=False)
minNestingLevel=-1 # NestingLevel should be "spezifizitaet"
for p in PS.getProjectsByVolumeName(RWVolName) :
if p.NestingLevel < minNestingLevel or minNestingLevel == -1 :
minNestingLevel = p.NestingLevel
PrjObj=p
dstSrv,dstP = PS.getNewVolumeLocation(PrjObj.name,VolObj)
if dstSrv == None :
sys.stderr.write("found no appropriate location for %s. Skipping\n" % VolObj.name)
continue
else :
dstSrv = dstFS.servernames[0]
if reqDstPart == None :
dstP=None
maxFree=0
parts=FS.getPartitions(dstSrv)
for p in parts :
if parts[p]["free"] > maxFree :
dstP=p
maxFree=parts[p]["free"]
if dstP == None :
sys.stderr.write("found no appropriate partition on server %s for %s. Skipping\n" % (dstSrv,VolObj.name))
continue
else :
# check if requested destination is ok
dstP=reqDstPart
skip_it= False
for rov in VS.getVolGroup(VolObj.name,cached=False)["RO"] :
if rov.serv_uuid == dstFS.uuid :
print "Found one existing RO-copy of %s on server %s. Skipping." % (VolObj.name,dstSrv)
skip_it = True
if skip_it : continue
# XXX only move RO, if we don't have enough of them -> auto-healing
# get RO-Sites of the Volume :
# the most specific project for this volums counts
print "Moving solitary RO %s to %s %s" % (v['name'],dstSrv,dstP)
if not afs.defaultConfig.dryrun :
VlD.addsite(RWVolName,dstSrv,dstP)
VD.release(RWVolName)
VD.remove(v['name'],srcFS.servernames[0],srcP)
movedVolcount += 1
if movedVolcount > afs.defaultConfig.maxnum and afs.defaultConfig.maxnum > 0 :
print "moved %d volumes. Terminating." % afs.defaultConfig.maxnum
sys.exit(0)
# check if partition is freed enough
parts=FS.getPartitions(afs.defaultConfig.ssrv)
if parts[srcP]["free"] > untilFree and untilFree > 0 :
print "%s bytes free on spart %s." % (afs.util.afsutil.humanReadableSize(parts[srcP]["free"]),srcP )
break
|
|
#!/usr/bin/env python
#
# hdfhelp.py
#
# This code makes using odb with Clearsilver as "easy as stealing candy
# from a baby". - jeske
#
# How to use:
#
# rows = tbl.fetchAllRows()
# rows.hdfExport("CGI.rows", hdf_dataset)
#
# row = tbl.fetchRow( ('primary_key', value) )
# row.hdfExport("CGI.row", hdf_dataset)
#
# How to setup:
#
# # define table
# class AgentsTable(odb.Table):
# def _defineRows(self):
# self.d_addColumn("agent_id",kInteger,None,primarykey = 1,autoincrement = 1)
# self.d_addColumn("login",kVarString,200,notnull=1)
# self.d_addColumn("ticket_count",kIncInteger,None)
#
# # make sure you return a subclass of hdfhelp.HdfRow
#
# def defaultRowClass(self):
# return hdfhelp.HdfRow
# def defaultRowListClass(self):
# return hdfhelp.HdfItemList
#
import string, os
import neo_cgi
import neo_cs
import neo_util
import odb
import time
import UserList
SECS_IN_MIN = 60
SECS_IN_HOUR = (SECS_IN_MIN * 60)
SECS_IN_DAY = (SECS_IN_HOUR * 24)
SECS_IN_WEEK = (SECS_IN_DAY * 7)
SECS_IN_MONTH = (SECS_IN_DAY * 30)
kYearPos = 0
kMonthPos = 1
kDayPos = 2
kHourPos = 3
kMinutePos = 4
kSecondPos = 5
kWeekdayPos = 6
kJulianDayPos = 7
kDSTPos = 8
def renderDate(then_time,day=0):
if then_time is None:
then_time = 0
then_time = int(then_time)
if then_time == 0 or then_time == -1:
return ""
then_tuple = time.localtime(then_time)
now_tuple = time.localtime(time.time())
if day or (then_tuple[kHourPos]==0 and then_tuple[kMinutePos]==0 and then_tuple[kSecondPos]==0):
# it's just a date
if then_tuple[kYearPos] == now_tuple[kYearPos]:
# no year
return time.strftime("%m/%d",then_tuple)
else:
# add year
return time.strftime("%m/%d/%Y",then_tuple)
else:
# it's a full time/date
return time.strftime("%m/%d/%Y %I:%M%p",then_tuple)
class HdfRow(odb.Row):
def hdfExport(self, prefix, hdf_dataset, *extra, **extranamed):
skip_fields = extranamed.get("skip_fields", None)
translate_dict = extranamed.get("translate_dict", None)
tz = extranamed.get("tz", "US/Pacific")
for col_name,value in self.items():
if skip_fields and (col_name in skip_fields):
continue
try:
name,col_type,col_options = self._table.getColumnDef(col_name)
except:
col_type = odb.kVarString
col_options = {}
if (value is not None):
if col_options.get("no_export",0): continue
if type(value) in [ type(0), type(0L) ]:
hdf_dataset.setValue(prefix + "." + col_name,"%d" % value)
elif type(value) == type(1.0):
if int(value) == value:
hdf_dataset.setValue(prefix + "." + col_name,"%d" % value)
else:
hdf_dataset.setValue(prefix + "." + col_name,"%0.2f" % value)
else:
if col_type == odb.kReal:
log("why are we here with this value: %s" % value)
if translate_dict:
for k,v in translate_dict.items():
value = string.replace(value,k,v)
hdf_dataset.setValue(prefix + "." + col_name,neo_cgi.htmlEscape(str(value)))
if col_options.get("int_date",0):
hdf_dataset.setValue(prefix + "." + col_name + ".string",renderDate(value))
hdf_dataset.setValue(prefix + "." + col_name + ".day_string",renderDate(value,day=1))
if value: neo_cgi.exportDate(hdf_dataset, "%s.%s" % (prefix, col_name), tz, value)
if col_options.has_key("enum_values"):
enum = col_options["enum_values"]
hdf_dataset.setValue(prefix + "." + col_name + ".enum",
str(enum.get(value,'')))
class HdfItemList(UserList.UserList):
def hdfExport(self,prefix,hdf_dataset,*extra,**extranamed):
export_by = extranamed.get("export_by", None)
n = 0
for row in self:
if export_by is not None:
n = row[export_by]
row.hdfExport("%s.%d" % (prefix,n),hdf_dataset,*extra,**extranamed)
n = n + 1
def setList(hdf, prefix, lst):
hdf.setValue(prefix+".0", str(len(lst)))
for n in range(len(lst)):
hdf.setValue(prefix+".%d" %(n+1), lst[n]);
def getList(hdf, name):
lst = []
for n in range(hdf.getIntValue(name,0)):
lst.append(hdf.getValue(name+".%d" %(n+1), ""))
return lst
def eval_cs(hdf,a_cs_string):
cs = neo_cs.CS(hdf)
try:
cs.parseStr(a_cs_string)
return cs.render()
except:
return "Error in CS tags: %s" % neo_cgi.htmlEscape(repr(a_cs_string))
def childloop(hdf):
children = []
if hdf:
hdf = hdf.child()
while hdf:
children.append(hdf)
hdf = hdf.next()
return children
# ----------------------------
class HDF_Database(odb.Database):
def defaultRowClass(self):
return HdfRow
def defaultRowListClass(self):
return HdfItemList
# ----------------------------
def loopHDF(hdf, name=None):
results = []
if name: o = hdf.getObj(name)
else: o = hdf
if o:
o = o.child()
while o:
results.append(o)
o = o.next()
return results
def loopKVHDF(hdf, name=None):
results = []
if name: o = hdf.getObj(name)
else: o = hdf
if o:
o = o.child()
while o:
results.append((o.name(), o.value()))
o = o.next()
return results
class hdf_iterator:
def __init__(self, hdf):
self.hdf = hdf
self.node = None
if self.hdf:
self.node = self.hdf.child()
def __iter__(self): return self
def next(self):
if not self.node:
raise StopIteration
ret = self.node
self.node = self.node.next()
return ret
class hdf_kv_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = (self.node.name(), self.node.value())
self.node = self.node.next()
return ret
class hdf_key_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = self.node.name()
self.node = self.node.next()
return ret
class hdf_ko_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = (self.node.name(), self.node)
self.node = self.node.next()
return ret
# ----------------------------
def test():
import neo_util
hdf = neo_util.HDF()
hdf.setValue("foo","1")
print eval_cs(hdf,"this should say 1 ===> <?cs var:foo ?>")
if __name__ == "__main__":
test()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import httplib2 as http
import httplib
import re
from urlparse import urlparse
import pprint
import urllib2
class streamscrobbler:
def parse_headers(self, response):
headers = {}
int = 0
while True:
line = response.readline()
if line == '\r\n':
break # end of headers
if ':' in line:
key, value = line.split(':', 1)
headers[key] = value.rstrip()
if int == 12:
break;
int = int + 1
return headers
# this is the fucntion you should call with the url to get all data sorted as a object in the return
def getServerInfo(self, url):
print
"shoutcast check v.2"
if url.endswith('.pls') or url.endswith('listen.pls?sid=1'):
address = self.checkPLS(url)
else:
address = url
if isinstance(address, str):
meta_interval = self.getAllData(address)
else:
meta_interval = {"status": 0, "metadata": None}
return meta_interval
def getAllData(self, address):
shoutcast = False
status = 0
request = urllib2.Request(address)
user_agent = 'iTunes/9.1.1'
request.add_header('User-Agent', user_agent)
request.add_header('icy-metadata', 1)
try:
response = urllib2.urlopen(request, timeout=6)
headers = self.getHeaders(response)
pp = pprint.PrettyPrinter(indent=4)
print "parse headers: "
pp.pprint(headers)
if "server" in headers:
shoutcast = headers['server']
elif "X-Powered-By" in headers:
shoutcast = headers['X-Powered-By']
elif "icy-notice1" in headers:
shoutcast = headers['icy-notice2']
else:
shoutcast = bool(1)
if isinstance(shoutcast, bool):
if shoutcast is True:
status = 1
else:
status = 0
metadata = False;
elif "SHOUTcast" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, False)
elif "Icecast" or "137" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
elif "StreamMachine" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
elif shoutcast is not None:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
else:
metadata = False
response.close()
return {"status": status, "metadata": metadata}
except urllib2.HTTPError, e:
print ' Error, HTTPError = ' + str(e.code)
return {"status": status, "metadata": None}
except urllib2.URLError, e:
print " Error, URLError: " + str(e.reason)
return {"status": status, "metadata": None}
except Exception, err:
print " Error: " + str(err)
return {"status": status, "metadata": None}
def checkPLS(self, address):
try:
response = urllib2.urlopen(address, timeout=2)
for line in response:
if line.startswith("File1="):
stream = line;
response.close()
if 'stream' in locals():
return stream[6:]
else:
return bool(0)
except Exception:
return bool(0)
def shoutcastCheck(self, response, headers, itsOld):
if itsOld is not True:
if 'icy-br' in headers:
bitrate = headers['icy-br']
bitrate = bitrate.rstrip()
else:
bitrate = None
if 'icy-metaint' in headers:
icy_metaint_header = headers['icy-metaint']
else:
icy_metaint_header = None
if "Content-Type" in headers:
contenttype = headers['Content-Type']
elif 'content-type' in headers:
contenttype = headers['content-type']
else:
if 'icy-br' in headers:
bitrate = headers['icy-br'].split(",")[0]
else:
bitrate = None
if 'icy-metaint' in headers:
icy_metaint_header = headers['icy-metaint']
else:
icy_metaint_header = None
if headers.get('Content-Type') is not None:
contenttype = headers.get('Content-Type')
elif headers.get('content-type') is not None:
contenttype = headers.get('content-type')
if icy_metaint_header is not None:
metaint = int(icy_metaint_header)
print "icy metaint: " + str(metaint)
read_buffer = metaint + 255
content = response.read(read_buffer)
start = "StreamTitle='"
end = "';"
try:
title = re.search('%s(.*)%s' % (start, end), content[metaint:]).group(1)
title = re.sub("StreamUrl='.*?';", "", title).replace("';", "").replace("StreamUrl='", "")
title = re.sub("&artist=.*", "", title)
title = re.sub("http://.*", "", title)
title.rstrip()
except Exception, err:
print "songtitle error: " + str(err)
title = content[metaint:].split("'")[1]
return {'song': title, 'bitrate': bitrate, 'contenttype': contenttype.rstrip()}
else:
print
"No metaint"
return False
def getHeaders(self, response):
if self.is_empty(response.headers.dict) is False:
headers = response.headers.dict
elif hasattr(response.info(),"item") and self.is_empty(response.info().item()) is False:
headers = response.info().item()
else:
headers = self.parse_headers(response)
return headers
def is_empty(self, any_structure):
if any_structure:
return False
else:
return True
def stripTags(self, text):
finished = 0
while not finished:
finished = 1
start = text.find("<")
if start >= 0:
stop = text[start:].find(">")
if stop >= 0:
text = text[:start] + text[start + stop + 1:]
finished = 0
return text
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import pytest
from warnings import catch_warnings
import numpy as np
from pandas.core.dtypes.common import is_float_dtype
from pandas import Series, Index, isnull, notnull
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.series import remove_na
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
pytest.skip("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
expected = obj.apply(wrapper, axis=i)
tm.assert_panel_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
expected = obj.apply(skipna_wrapper, axis=i)
tm.assert_panel_equal(result, expected)
pytest.raises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
def test_get_axis(self):
assert self.panel4d._get_axis(0) is self.panel4d.labels
assert self.panel4d._get_axis(1) is self.panel4d.items
assert self.panel4d._get_axis(2) is self.panel4d.major_axis
assert self.panel4d._get_axis(3) is self.panel4d.minor_axis
def test_set_axis(self):
with catch_warnings(record=True):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
assert 'l1' not in self.panel4d._item_cache
assert self.panel4d.labels is new_labels
self.panel4d.major_axis = new_major
assert self.panel4d[0].major_axis is new_major
assert self.panel4d.major_axis is new_major
self.panel4d.minor_axis = new_minor
assert self.panel4d[0].minor_axis is new_minor
assert self.panel4d.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel4d._get_axis_number('labels') == 0
assert self.panel4d._get_axis_number('items') == 1
assert self.panel4d._get_axis_number('major') == 2
assert self.panel4d._get_axis_number('minor') == 3
def test_get_axis_name(self):
assert self.panel4d._get_axis_name(0) == 'labels'
assert self.panel4d._get_axis_name(1) == 'items'
assert self.panel4d._get_axis_name(2) == 'major_axis'
assert self.panel4d._get_axis_name(3) == 'minor_axis'
def test_arith(self):
with catch_warnings(record=True):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
pytest.raises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
tm.assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
assert (len(list(self.panel4d.iteritems())) ==
len(self.panel4d.labels))
def test_combinePanel4d(self):
with catch_warnings(record=True):
result = self.panel4d.add(self.panel4d)
tm.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with catch_warnings(record=True):
tm.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with catch_warnings(record=True):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
tm.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
tm.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
tm.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
tm.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
tm.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with catch_warnings(record=True):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
tm.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
tm.assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with catch_warnings(record=True):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
tm.assert_panel_equal(expected, result)
assert 'l2' not in self.panel4d.labels
del self.panel4d['l3']
assert 'l3' not in self.panel4d.labels
pytest.raises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
with catch_warnings(record=True):
# Panel
p = Panel(dict(
ItemA=self.panel4d['l1']['ItemA'][2:].filter(
items=['A', 'B'])))
self.panel4d['l4'] = p
self.panel4d['l5'] = p
p2 = self.panel4d['l4']
tm.assert_panel_equal(p, p2.reindex(items=p.items,
major_axis=p.major_axis,
minor_axis=p.minor_axis))
# scalar
self.panel4d['lG'] = 1
self.panel4d['lE'] = True
assert self.panel4d['lG'].values.dtype == np.int64
assert self.panel4d['lE'].values.dtype == np.bool_
# object dtype
self.panel4d['lQ'] = 'foo'
assert self.panel4d['lQ'].values.dtype == np.object_
# boolean dtype
self.panel4d['lP'] = self.panel4d['l1'] > 0
assert self.panel4d['lP'].values.dtype == np.bool_
def test_setitem_by_indexer(self):
with catch_warnings(record=True):
# Panel
panel4dc = self.panel4d.copy()
p = panel4dc.iloc[0]
def func():
self.panel4d.iloc[0] = p
pytest.raises(NotImplementedError, func)
# DataFrame
panel4dc = self.panel4d.copy()
df = panel4dc.iloc[0, 0]
df.iloc[:] = 1
panel4dc.iloc[0, 0] = df
assert (panel4dc.iloc[0, 0].values == 1).all()
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0, 0, :, 0]
s.iloc[:] = 1
panel4dc.iloc[0, 0, :, 0] = s
assert (panel4dc.iloc[0, 0, :, 0].values == 1).all()
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
assert (panel4dc.iloc[0].values == 1).all()
assert panel4dc.iloc[1].values.all()
assert (panel4dc.iloc[2].values == 'foo').all()
def test_setitem_by_indexer_mixed_type(self):
with catch_warnings(record=True):
# GH 8702
self.panel4d['foo'] = 'bar'
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
assert (panel4dc.iloc[0].values == 1).all()
assert panel4dc.iloc[1].values.all()
assert (panel4dc.iloc[2].values == 'foo').all()
def test_comparisons(self):
with catch_warnings(record=True):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
tp = p1.reindex(labels=p1.labels.tolist() + ['foo'])
p = p1[p1.labels[0]]
def test_comp(func):
result = func(p1, p2)
tm.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
pytest.raises(Exception, func, p1, tp)
# versus different objs
pytest.raises(Exception, func, p1, p)
result3 = func(self.panel4d, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.panel4d.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
with catch_warnings(record=True):
xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'],
ref.xs(idx), check_names=False)
# not contained
idx = self.panel4d.major_axis[0] - BDay()
pytest.raises(Exception, self.panel4d.major_xs, idx)
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
with catch_warnings(record=True):
xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
assert xs['l1']['A'].dtype == np.float64
assert xs['l4']['A'].dtype == np.object_
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
with catch_warnings(record=True):
idx = self.panel4d.minor_axis[1]
xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
# not contained
pytest.raises(Exception, self.panel4d.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
with catch_warnings(record=True):
xs = self.panel4d.minor_xs('D')
assert xs['l1'].T['ItemA'].dtype == np.float64
assert xs['l4'].T['ItemA'].dtype == np.object_
def test_xs(self):
l1 = self.panel4d.xs('l1', axis=0)
expected = self.panel4d['l1']
tm.assert_panel_equal(l1, expected)
# View if possible
l1_view = self.panel4d.xs('l1', axis=0)
l1_view.values[:] = np.nan
assert np.isnan(self.panel4d['l1'].values).all()
# Mixed-type
self.panel4d['strings'] = 'foo'
with catch_warnings(record=True):
result = self.panel4d.xs('D', axis=3)
assert result.is_copy is not None
def test_getitem_fancy_labels(self):
with catch_warnings(record=True):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
items = panel4d.items[[1, 0]]
dates = panel4d.major_axis[::2]
cols = ['D', 'C', 'F']
# all 4 specified
tm.assert_panel4d_equal(panel4d.loc[labels, items, dates, cols],
panel4d.reindex(labels=labels, items=items,
major=dates, minor=cols))
# 3 specified
tm.assert_panel4d_equal(panel4d.loc[:, items, dates, cols],
panel4d.reindex(items=items, major=dates,
minor=cols))
# 2 specified
tm.assert_panel4d_equal(panel4d.loc[:, :, dates, cols],
panel4d.reindex(major=dates, minor=cols))
tm.assert_panel4d_equal(panel4d.loc[:, items, :, cols],
panel4d.reindex(items=items, minor=cols))
tm.assert_panel4d_equal(panel4d.loc[:, items, dates, :],
panel4d.reindex(items=items, major=dates))
# only 1
tm.assert_panel4d_equal(panel4d.loc[:, items, :, :],
panel4d.reindex(items=items))
tm.assert_panel4d_equal(panel4d.loc[:, :, dates, :],
panel4d.reindex(major=dates))
tm.assert_panel4d_equal(panel4d.loc[:, :, :, cols],
panel4d.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
pass
def test_get_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
result = self.panel4d.get_value(
label, item, mjr, mnr)
expected = self.panel4d[label][item][mnr][mjr]
assert_almost_equal(result, expected)
def test_set_value(self):
with catch_warnings(record=True):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
self.panel4d.set_value(label, item, mjr, mnr, 1.)
tm.assert_almost_equal(
self.panel4d[label][item][mnr][mjr], 1.)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['l4'].values)
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
assert isinstance(res, Panel4D)
assert res is not self.panel4d
assert res.get_value('l4', 'ItemE', 'foo', 'bar') == 1.5
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['l4'].values)
class TestPanel4d(CheckIndexing, SafeForSparse,
SafeForLongAndSparse):
def setup_method(self, method):
with catch_warnings(record=True):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
with catch_warnings(record=True):
panel4d = Panel4D(self.panel4d._data)
assert panel4d._data is self.panel4d._data
panel4d = Panel4D(self.panel4d._data, copy=True)
assert panel4d._data is not self.panel4d._data
tm.assert_panel4d_equal(panel4d, self.panel4d)
vals = self.panel4d.values
# no copy
panel4d = Panel4D(vals)
assert panel4d.values is vals
# copy
panel4d = Panel4D(vals, copy=True)
assert panel4d.values is not vals
# GH #8285, test when scalar data is used to construct a Panel4D
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
panel4d = Panel4D(val, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5))
vals = np.empty((2, 3, 4, 5), dtype=dtype)
vals.fill(val)
expected = Panel4D(vals, dtype=dtype)
tm.assert_panel4d_equal(panel4d, expected)
# test the case when dtype is passed
panel4d = Panel4D(1, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5), dtype='float32')
vals = np.empty((2, 3, 4, 5), dtype='float32')
vals.fill(1)
expected = Panel4D(vals, dtype='float32')
tm.assert_panel4d_equal(panel4d, expected)
def test_constructor_cast(self):
with catch_warnings(record=True):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
casted2 = Panel4D(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel4D(zero_filled._data, dtype=np.int32)
casted2 = Panel4D(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_consolidate(self):
with catch_warnings(record=True):
assert self.panel4d._data.is_consolidated()
self.panel4d['foo'] = 1.
assert not self.panel4d._data.is_consolidated()
panel4d = self.panel4d._consolidate()
assert panel4d._data.is_consolidated()
def test_ctor_dict(self):
with catch_warnings(record=True):
l1 = self.panel4d['l1']
l2 = self.panel4d['l2']
d = {'A': l1, 'B': l2.loc[['ItemB'], :, :]}
panel4d = Panel4D(d)
tm.assert_panel_equal(panel4d['A'], self.panel4d['l1'])
tm.assert_frame_equal(panel4d.loc['B', 'ItemB', :, :],
self.panel4d.loc['l2', ['ItemB'],
:, :]['ItemB'])
def test_constructor_dict_mixed(self):
with catch_warnings(record=True):
data = dict((k, v.values) for k, v in self.panel4d.iteritems())
result = Panel4D(data)
exp_major = Index(np.arange(len(self.panel4d.major_axis)))
tm.assert_index_equal(result.major_axis, exp_major)
result = Panel4D(data,
labels=self.panel4d.labels,
items=self.panel4d.items,
major_axis=self.panel4d.major_axis,
minor_axis=self.panel4d.minor_axis)
tm.assert_panel4d_equal(result, self.panel4d)
data['l2'] = self.panel4d['l2']
result = Panel4D(data)
tm.assert_panel4d_equal(result, self.panel4d)
# corner, blow up
data['l2'] = data['l2']['ItemB']
pytest.raises(Exception, Panel4D, data)
data['l2'] = self.panel4d['l2'].values[:, :, :-1]
pytest.raises(Exception, Panel4D, data)
def test_constructor_resize(self):
with catch_warnings(record=True):
data = self.panel4d._data
labels = self.panel4d.labels[:-1]
items = self.panel4d.items[:-1]
major = self.panel4d.major_axis[:-1]
minor = self.panel4d.minor_axis[:-1]
result = Panel4D(data, labels=labels, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel4d.reindex(
labels=labels, items=items, major=major, minor=minor)
tm.assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items, major_axis=major)
expected = self.panel4d.reindex(items=items, major=major)
tm.assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items)
expected = self.panel4d.reindex(items=items)
tm.assert_panel4d_equal(result, expected)
result = Panel4D(data, minor_axis=minor)
expected = self.panel4d.reindex(minor=minor)
tm.assert_panel4d_equal(result, expected)
def test_conform(self):
with catch_warnings(record=True):
p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
conformed = self.panel4d.conform(p)
tm.assert_index_equal(conformed.items, self.panel4d.labels)
tm.assert_index_equal(conformed.major_axis,
self.panel4d.major_axis)
tm.assert_index_equal(conformed.minor_axis,
self.panel4d.minor_axis)
def test_reindex(self):
with catch_warnings(record=True):
ref = self.panel4d['l2']
# labels
result = self.panel4d.reindex(labels=['l1', 'l2'])
tm.assert_panel_equal(result['l2'], ref)
# items
result = self.panel4d.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])
# major
new_major = list(self.panel4d.major_axis[:10])
result = self.panel4d.reindex(major=new_major)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))
# raise exception put both major and major_axis
pytest.raises(Exception, self.panel4d.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel4d.minor_axis[:2])
result = self.panel4d.reindex(minor=new_minor)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))
result = self.panel4d.reindex(labels=self.panel4d.labels,
items=self.panel4d.items,
major=self.panel4d.major_axis,
minor=self.panel4d.minor_axis)
# don't necessarily copy
result = self.panel4d.reindex()
tm.assert_panel4d_equal(result, self.panel4d)
assert result is not self.panel4d
# with filling
smaller_major = self.panel4d.major_axis[::5]
smaller = self.panel4d.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel4d.major_axis,
method='pad')
tm.assert_panel_equal(larger.loc[:, :,
self.panel4d.major_axis[1], :],
smaller.loc[:, :, smaller_major[0], :])
# don't necessarily copy
result = self.panel4d.reindex(
major=self.panel4d.major_axis, copy=False)
tm.assert_panel4d_equal(result, self.panel4d)
assert result is self.panel4d
def test_not_hashable(self):
with catch_warnings(record=True):
p4D_empty = Panel4D()
pytest.raises(TypeError, hash, p4D_empty)
pytest.raises(TypeError, hash, self.panel4d)
def test_reindex_like(self):
# reindex_like
with catch_warnings(record=True):
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
items=self.panel4d.items[:-1],
major=self.panel4d.major_axis[:-1],
minor=self.panel4d.minor_axis[:-1])
smaller_like = self.panel4d.reindex_like(smaller)
tm.assert_panel4d_equal(smaller, smaller_like)
def test_sort_index(self):
with catch_warnings(record=True):
import random
rlabels = list(self.panel4d.labels)
ritems = list(self.panel4d.items)
rmajor = list(self.panel4d.major_axis)
rminor = list(self.panel4d.minor_axis)
random.shuffle(rlabels)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel4d.reindex(labels=rlabels)
sorted_panel4d = random_order.sort_index(axis=0)
tm.assert_panel4d_equal(sorted_panel4d, self.panel4d)
def test_fillna(self):
with catch_warnings(record=True):
assert not np.isfinite(self.panel4d.values).all()
filled = self.panel4d.fillna(0)
assert np.isfinite(filled.values).all()
pytest.raises(NotImplementedError,
self.panel4d.fillna, method='pad')
def test_swapaxes(self):
with catch_warnings(record=True):
result = self.panel4d.swapaxes('labels', 'items')
assert result.items is self.panel4d.labels
result = self.panel4d.swapaxes('labels', 'minor')
assert result.labels is self.panel4d.minor_axis
result = self.panel4d.swapaxes('items', 'minor')
assert result.items is self.panel4d.minor_axis
result = self.panel4d.swapaxes('items', 'major')
assert result.items is self.panel4d.major_axis
result = self.panel4d.swapaxes('major', 'minor')
assert result.major_axis is self.panel4d.minor_axis
# this should also work
result = self.panel4d.swapaxes(0, 1)
assert result.labels is self.panel4d.items
# this works, but return a copy
result = self.panel4d.swapaxes('items', 'items')
tm.assert_panel4d_equal(self.panel4d, result)
assert id(self.panel4d) != id(result)
def test_update(self):
with catch_warnings(record=True):
p4d = Panel4D([[[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
other = Panel4D([[[[3.6, 2., np.nan]],
[[np.nan, np.nan, 7]]]])
p4d.update(other)
expected = Panel4D([[[[3.6, 2, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
tm.assert_panel4d_equal(p4d, expected)
def test_dtypes(self):
result = self.panel4d.dtypes
expected = Series(np.dtype('float64'), index=self.panel4d.labels)
assert_series_equal(result, expected)
def test_repr_empty(self):
with catch_warnings(record=True):
empty = Panel4D()
repr(empty)
def test_rename(self):
with catch_warnings(record=True):
mapper = {'l1': 'foo',
'l2': 'bar',
'l3': 'baz'}
renamed = self.panel4d.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
tm.assert_index_equal(renamed.labels, exp)
renamed = self.panel4d.rename_axis(str.lower, axis=3)
exp = Index(['a', 'b', 'c', 'd'])
tm.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel4d.rename_axis(mapper,
axis=0,
copy=False)
renamed_nocopy['foo'] = 3.
assert (self.panel4d['l1'].values == 3).all()
def test_get_attr(self):
tm.assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
# GH issue 15960
def test_sort_values(self):
pytest.raises(NotImplementedError, self.panel4d.sort_values)
pytest.raises(NotImplementedError, self.panel4d.sort_values, 'ItemA')
|
|
from otp.ai.AIBaseGlobal import *
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from otp.avatar import DistributedAvatarAI
import SuitTimings
from direct.task import Task
import SuitPlannerBase
import SuitBase
import SuitDialog
import SuitDNA
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import SuitBattleGlobals
from toontown.building import FADoorCodes
import DistributedSuitBaseAI
from toontown.hood import ZoneUtil
import random
class DistributedSuitAI(DistributedSuitBaseAI.DistributedSuitBaseAI):
SUIT_BUILDINGS = simbase.config.GetBool('want-suit-buildings', 1)
DEBUG_SUIT_POSITIONS = simbase.config.GetBool('debug-suit-positions', 0)
UPDATE_TIMESTAMP_INTERVAL = 180.0
myId = 0
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuitAI')
def __init__(self, air, suitPlanner):
DistributedSuitBaseAI.DistributedSuitBaseAI.__init__(self, air, suitPlanner)
self.bldgTrack = None
self.branchId = None
if suitPlanner:
self.branchId = suitPlanner.zoneId
self.pathEndpointStart = 0
self.pathEndpointEnd = 0
self.minPathLen = 0
self.maxPathLen = 0
self.pathPositionIndex = 0
self.pathPositionTimestamp = 0.0
self.pathState = 0
self.currentLeg = 0
self.legType = SuitLeg.TOff
self.flyInSuit = 0
self.buildingSuit = 0
self.attemptingTakeover = 0
self.buildingDestination = None
self.buildingDestinationIsCogdo = False
return
def stopTasks(self):
taskMgr.remove(self.taskName('flyAwayNow'))
taskMgr.remove(self.taskName('danceNowFlyAwayLater'))
taskMgr.remove(self.taskName('move'))
def pointInMyPath(self, point, elapsedTime):
if self.pathState != 1:
return 0
then = globalClock.getFrameTime() + elapsedTime
elapsed = then - self.pathStartTime
if not self.sp:
pass
return self.legList.isPointInRange(point, elapsed - self.sp.PATH_COLLISION_BUFFER, elapsed + self.sp.PATH_COLLISION_BUFFER)
def requestBattle(self, x, y, z, h, p, r):
toonId = self.air.getAvatarIdFromSender()
if self.air.doId2do.get(toonId) == None:
return
if self.pathState == 3:
pass
elif self.pathState != 1:
if self.notify.getDebug():
self.notify.debug('requestBattle() - suit %d not on path' % self.getDoId())
if self.pathState == 2 or self.pathState == 4:
self.b_setBrushOff(SuitDialog.getBrushOffIndex(self.getStyleName()))
self.d_denyBattle(toonId)
return
elif self.legType != SuitLeg.TWalk:
if self.notify.getDebug():
self.notify.debug('requestBattle() - suit %d not in Bellicose' % self.getDoId())
self.b_setBrushOff(SuitDialog.getBrushOffIndex(self.getStyleName()))
self.d_denyBattle(toonId)
return
self.confrontPos = Point3(x, y, z)
self.confrontHpr = Vec3(h, p, r)
if self.sp.requestBattle(self.zoneId, self, toonId):
if self.notify.getDebug():
self.notify.debug('Suit %d requesting battle in zone %d' % (self.getDoId(), self.zoneId))
else:
if self.notify.getDebug():
self.notify.debug('requestBattle from suit %d - denied by battle manager' % self.getDoId())
self.b_setBrushOff(SuitDialog.getBrushOffIndex(self.getStyleName()))
self.d_denyBattle(toonId)
return
def getConfrontPosHpr(self):
return (self.confrontPos, self.confrontHpr)
def flyAwayNow(self):
self.b_setPathState(2)
self.stopPathNow()
name = self.taskName('flyAwayNow')
taskMgr.remove(name)
taskMgr.doMethodLater(SuitTimings.toSky, self.finishFlyAwayNow, name)
def danceNowFlyAwayLater(self):
self.b_setPathState(4)
self.stopPathNow()
name = self.taskName('danceNowFlyAwayLater')
taskMgr.remove(name)
taskMgr.doMethodLater(SuitTimings.victoryDance + SuitTimings.toSky, self.finishFlyAwayNow, name)
def finishFlyAwayNow(self, task):
self.notify.debug('Suit %s finishFlyAwayNow' % self.doId)
self.requestRemoval()
return Task.done
def d_setSPDoId(self, doId):
self.sendUpdate('setSPDoId', [doId])
def getSPDoId(self):
if self.sp:
return self.sp.getDoId()
else:
return 0
def releaseControl(self):
self.b_setPathState(0)
def b_setPathEndpoints(self, start, end, minPathLen, maxPathLen):
self.setPathEndpoints(start, end, minPathLen, maxPathLen)
self.d_setPathEndpoints(start, end, minPathLen, maxPathLen)
def d_setPathEndpoints(self, start, end, minPathLen, maxPathLen):
self.sendUpdate('setPathEndpoints', [start,
end,
minPathLen,
maxPathLen])
def setPathEndpoints(self, start, end, minPathLen, maxPathLen):
self.pathEndpointStart = start
self.pathEndpointEnd = end
self.minPathLen = minPathLen
self.maxPathLen = maxPathLen
def getPathEndpoints(self):
return (self.pathEndpointStart,
self.pathEndpointEnd,
self.minPathLen,
self.maxPathLen)
def b_setPathPosition(self, index, timestamp):
self.setPathPosition(index, timestamp)
self.d_setPathPosition(index, timestamp)
def d_setPathPosition(self, index, timestamp):
self.notify.debug('Suit %d reaches point %d at time %0.2f' % (self.getDoId(), index, timestamp))
self.sendUpdate('setPathPosition', [index, globalClockDelta.localToNetworkTime(timestamp)])
def setPathPosition(self, index, timestamp):
self.pathPositionIndex = index
self.pathPositionTimestamp = timestamp
def getPathPosition(self):
return (self.pathPositionIndex, globalClockDelta.localToNetworkTime(self.pathPositionTimestamp))
def b_setPathState(self, state):
self.setPathState(state)
self.d_setPathState(state)
def d_setPathState(self, state):
self.sendUpdate('setPathState', [state])
def setPathState(self, state):
if self.pathState != state:
self.pathState = state
if state == 0:
self.stopPathNow()
elif state == 1:
self.moveToNextLeg(None)
elif state == 2:
self.stopPathNow()
elif state == 3:
pass
elif state == 4:
self.stopPathNow()
else:
self.notify.error('Invalid state: ' + str(state))
return
def getPathState(self):
return self.pathState
def d_debugSuitPosition(self, elapsed, currentLeg, x, y, timestamp):
timestamp = globalClockDelta.localToNetworkTime(timestamp)
self.sendUpdate('debugSuitPosition', [elapsed,
currentLeg,
x,
y,
timestamp])
def initializePath(self):
self.makeLegList()
if self.notify.getDebug():
self.notify.debug('Leg list:')
print self.legList
idx1 = self.startPoint.getIndex()
idx2 = self.endPoint.getIndex()
self.pathStartTime = globalClock.getFrameTime()
self.setPathEndpoints(idx1, idx2, self.minPathLen, self.maxPathLen)
self.setPathPosition(0, self.pathStartTime)
self.pathState = 1
self.currentLeg = 0
self.zoneId = ZoneUtil.getTrueZoneId(self.legList.getZoneId(0), self.branchId)
self.legType = self.legList.getType(0)
if self.notify.getDebug():
self.notify.debug('creating suit in zone %d' % self.zoneId)
def resync(self):
self.b_setPathPosition(self.currentLeg, self.pathStartTime + self.legList.getStartTime(self.currentLeg))
def moveToNextLeg(self, task):
now = globalClock.getFrameTime()
elapsed = now - self.pathStartTime
nextLeg = self.legList.getLegIndexAtTime(elapsed, self.currentLeg)
numLegs = self.legList.getNumLegs()
if self.currentLeg != nextLeg:
self.currentLeg = nextLeg
self.__beginLegType(self.legList.getType(nextLeg))
zoneId = self.legList.getZoneId(nextLeg)
zoneId = ZoneUtil.getTrueZoneId(zoneId, self.branchId)
self.__enterZone(zoneId)
self.notify.debug('Suit %d reached leg %d of %d in zone %d.' % (self.getDoId(),
nextLeg,
numLegs - 1,
self.zoneId))
if self.DEBUG_SUIT_POSITIONS:
leg = self.legList.getLeg(nextLeg)
pos = leg.getPosAtTime(elapsed - leg.getStartTime())
self.d_debugSuitPosition(elapsed, nextLeg, pos[0], pos[1], now)
if now - self.pathPositionTimestamp > self.UPDATE_TIMESTAMP_INTERVAL:
self.resync()
if self.pathState != 1:
return Task.done
nextLeg += 1
while nextLeg + 1 < numLegs and self.legList.getZoneId(nextLeg) == ZoneUtil.getCanonicalZoneId(self.zoneId) and self.legList.getType(nextLeg) == self.legType:
nextLeg += 1
if nextLeg < numLegs:
nextTime = self.legList.getStartTime(nextLeg)
delay = nextTime - elapsed
taskMgr.remove(self.taskName('move'))
taskMgr.doMethodLater(delay, self.moveToNextLeg, self.taskName('move'))
else:
if self.attemptingTakeover:
self.startTakeOver()
self.requestRemoval()
return Task.done
def stopPathNow(self):
taskMgr.remove(self.taskName('move'))
def __enterZone(self, zoneId):
if zoneId != self.zoneId:
self.sp.zoneChange(self, self.zoneId, zoneId)
self.air.sendSetZone(self, zoneId)
self.zoneId = zoneId
if self.pathState == 1:
self.sp.checkForBattle(zoneId, self)
def __beginLegType(self, legType):
self.legType = legType
if legType == SuitLeg.TWalkFromStreet:
self.checkBuildingState()
elif legType == SuitLeg.TToToonBuilding:
self.openToonDoor()
elif legType == SuitLeg.TToSuitBuilding:
self.openSuitDoor()
elif legType == SuitLeg.TToCoghq:
self.openCogHQDoor(1)
elif legType == SuitLeg.TFromCoghq:
self.openCogHQDoor(0)
def resume(self):
self.notify.debug('Suit %s resume' % self.doId)
if self.currHP <= 0:
self.notify.debug('Suit %s dead after resume' % self.doId)
self.requestRemoval()
else:
self.danceNowFlyAwayLater()
def prepareToJoinBattle(self):
self.b_setPathState(0)
def interruptMove(self):
SuitBase.SuitBase.interruptMove(self)
def checkBuildingState(self):
blockNumber = self.buildingDestination
if blockNumber == None:
return
building = self.sp.buildingMgr.getBuilding(blockNumber)
if self.attemptingTakeover:
if not building.isToonBlock():
self.flyAwayNow()
return
if not hasattr(building, 'door'):
self.flyAwayNow()
return
building.door.setDoorLock(FADoorCodes.SUIT_APPROACHING)
elif not building.isSuitBlock():
self.flyAwayNow()
return
def openToonDoor(self):
blockNumber = self.buildingDestination
building = self.sp.buildingMgr.getBuilding(blockNumber)
if not building.isToonBlock():
self.flyAwayNow()
return
if not hasattr(building, 'door'):
self.flyAwayNow()
return
building.door.requestSuitEnter(self.getDoId())
def openSuitDoor(self):
blockNumber = self.buildingDestination
building = self.sp.buildingMgr.getBuilding(blockNumber)
if not building.isSuitBlock():
self.flyAwayNow()
return
def openCogHQDoor(self, enter):
blockNumber = self.legList.getBlockNumber(self.currentLeg)
try:
door = self.sp.cogHQDoors[blockNumber]
except:
self.notify.error('No CogHQ door %s in zone %s' % (blockNumber, self.sp.zoneId))
return
if enter:
door.requestSuitEnter(self.getDoId())
else:
door.requestSuitExit(self.getDoId())
def startTakeOver(self):
if not self.SUIT_BUILDINGS:
return
blockNumber = self.buildingDestination
if not self.sp.buildingMgr.isSuitBlock(blockNumber):
self.notify.debug('Suit %d taking over building %d in %d' % (self.getDoId(), blockNumber, self.zoneId))
difficulty = self.getActualLevel() - 1
if self.buildingDestinationIsCogdo:
self.sp.cogdoTakeOver(blockNumber, difficulty, self.buildingHeight)
else:
dept = SuitDNA.getSuitDept(self.dna.name)
self.sp.suitTakeOver(blockNumber, dept, difficulty, self.buildingHeight)
|
|
#
# Copyright 2016 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from unittest import TestCase
import json
import logging
import os
import tempfile
import requests
from importio2.apicore import extractor_cancel
from importio2.apicore import extractor_csv
from importio2.apicore import extractor_get
from importio2.apicore import extractor_get_crawl_runs
from importio2.apicore import extractor_json
from importio2.apicore import extractor_log
from importio2.apicore import extractor_query
from importio2.apicore import extractor_start
from importio2.apicore import extractor_url_list_get
from importio2.apicore import extractor_url_list_put
from importio2.apicore import object_store_change_ownership
from importio2.apicore import object_store_create
from importio2.apicore import object_store_get
from importio2.apicore import object_store_put_attachment
from importio2.apicore import object_store_stream_attachment
from tests.unit.importio2.test_data import CrawlRunFilesDownloadTestData
from tests.unit.importio2.test_data import CrawlRunGet
from tests.unit.importio2.test_data import ExtractorCSVTestData
from tests.unit.importio2.test_data import ExtractorCrawlRunsTestData
from tests.unit.importio2.test_data import ExtractorJSONTestData
from tests.unit.importio2.test_data import ExtractorLogTestData
from tests.unit.importio2.test_data import ExtractorUrlListPutTestData
from tests.unit.importio2.test_data import ObjectStoreCrawlRunTestData
from tests.unit.importio2.test_data import ObjectStoreExtractorOwnership
from tests.unit.importio2.test_data import ObjectStoreExtractorPutCsvAttachment
from tests.unit.importio2.test_data import ObjectStoreExtractorPutJsonAttachment
from tests.unit.importio2.test_data import ObjectStoreExtractorPutUrlListAttachment
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG)
# Todo: Refactor standard location for test data
EXTRACTOR_GUID = 'a3fcec06-08b4-4b96-8fa8-a942f99cd1aa'
EXTRACTOR_URL_LIST_GUID = '12834ceb-76d2-4072-98bb-7e41a7c032ae'
EXTRACTOR_QUERY_URL = u'http://www.example.com/'
EXTRACTOR_NAME = 'API_TEST-example.com'
EXTRACTOR_RUNTIME_CONFIG = 'c7c4f78b-4b21-4d85-8e07-e0dbde9e517c'
API_TEST_GET_URL_LIST = '9dd8b560-70c1-43f1-902d-567ac2e2cf3f'
API_TEST_GET_URL_LIST_GUID = '0c5ee717-b9b9-4023-811d-e6ee5cf11ce9'
API_TEST_START_CANCEL = 'df761a66-c218-46ab-9655-01250e9c7214'
class TestApiCore(TestCase):
def setUp(self):
self._api_key = os.environ['IMPORT_IO_API_KEY']
self._response = extractor_get(self._api_key, EXTRACTOR_GUID)
self._extractor = json.loads(self._response.text)
def test_extractor_get(self):
response = extractor_get(self._api_key, EXTRACTOR_GUID)
self.assertEquals(response.status_code, requests.codes.ok)
def test_extractor_id(self):
self.assertEquals(self._extractor['guid'], EXTRACTOR_GUID)
def test_extractor_name(self):
self.assertEquals(self._extractor['name'], EXTRACTOR_NAME)
def test_extractor_url_list_get(self):
response = extractor_url_list_get(self._api_key, EXTRACTOR_GUID, EXTRACTOR_URL_LIST_GUID)
def test_extractor_url_list_get_long(self):
response = extractor_url_list_get(self._api_key, API_TEST_GET_URL_LIST, API_TEST_GET_URL_LIST_GUID)
self.assertEqual(requests.codes.OK, response.status_code)
content = """http://www.ikea.com/us/en/search/?query=chairs&pageNumber=1
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=2
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=3
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=4
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=5
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=6
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=7
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=8
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=9
http://www.ikea.com/us/en/search/?query=chairs&pageNumber=10"""
self.assertEqual(content, response.text)
def test_extractor_url_list_put(self):
response = extractor_url_list_put(
self._api_key, ExtractorUrlListPutTestData.EXTRACTOR_ID, ExtractorUrlListPutTestData.URL_LIST)
self.assertEqual(requests.codes.OK, response.status_code)
print(response.text)
def test_extractor_query(self):
response = extractor_query(self._api_key, EXTRACTOR_GUID, EXTRACTOR_QUERY_URL)
self.assertEqual(requests.codes.OK, response.status_code)
result = response.json()
extractor_data = result['extractorData']
data = extractor_data['data']
page_data = result['pageData']
self.assertEqual(-1, result['sequenceNumber'])
self.assertEqual(EXTRACTOR_QUERY_URL, result['url'])
self.assertEqual(requests.codes.OK, page_data['statusCode'])
self.assertEqual(EXTRACTOR_QUERY_URL, result['url'])
self.assertEqual(EXTRACTOR_RUNTIME_CONFIG, result['runtimeConfigId'])
def test_extractor_get_crawl_runs(self):
response = extractor_get_crawl_runs(self._api_key, ExtractorCrawlRunsTestData.EXTRACTOR_ID, 1, 30)
self.assertEqual(requests.codes.OK, response.status_code)
result = response.json()
hits = result['hits']['hits']
self.assertEqual(ExtractorCrawlRunsTestData.CRAWL_RUNS_LEN, len(hits))
crawl_run = hits[0]
crawl_run_fields = crawl_run['fields']
self.assertEqual(ExtractorCrawlRunsTestData.GUID, crawl_run_fields['guid'])
self.assertEqual(ExtractorCrawlRunsTestData.TYPE, crawl_run['_type'])
self.assertEqual(ExtractorCrawlRunsTestData.GUID, crawl_run['_id'])
self.assertEqual(ExtractorCrawlRunsTestData.EXTRACTOR_ID, crawl_run_fields['extractorId'])
self.assertEqual(ExtractorCrawlRunsTestData.STATE, crawl_run_fields['state'])
self.assertEqual(ExtractorCrawlRunsTestData.TOTAL_URL_COUNT, crawl_run_fields['totalUrlCount'])
self.assertEqual(ExtractorCrawlRunsTestData.SUCCESS_URL_COUNT, crawl_run_fields['successUrlCount'])
self.assertEqual(ExtractorCrawlRunsTestData.FAILED_URL_COUNT, crawl_run_fields['failedUrlCount'])
def test_extractor_cancel(self):
response = extractor_cancel(self._api_key, EXTRACTOR_GUID)
self.assertEqual(requests.codes.BAD_REQUEST, response.status_code)
def test_extractor_start(self):
response = extractor_start(self._api_key, EXTRACTOR_GUID)
self.assertEqual(requests.codes.OK, response.status_code)
extractor_cancel(self._api_key, EXTRACTOR_GUID)
def test_extractor_start_cancel(self):
response = extractor_start(self._api_key, API_TEST_START_CANCEL)
self.assertEqual(requests.codes.OK, response.status_code)
response = extractor_cancel(self._api_key, API_TEST_START_CANCEL)
self.assertEqual(requests.codes.OK, response.status_code)
def test_extractor_csv(self):
response = extractor_csv(self._api_key, ExtractorCSVTestData.EXTRACTOR_ID)
self.assertEqual(requests.codes.OK, response.status_code)
results = response.text.split('\n')
results = results[:-1]
self.assertEqual(ExtractorCSVTestData.CSV_LEN, len(results))
def test_extractor_json(self):
response = extractor_json(self._api_key, ExtractorJSONTestData.EXTRACTOR_ID)
self.assertEqual(requests.codes.OK, response.status_code)
results = response.text.split('\n')
self.assertEqual(ExtractorJSONTestData.JSON_LEN_RAW, len(results))
def test_extractor_log(self):
response = extractor_log(self._api_key, ExtractorLogTestData.EXTRACTOR_ID)
self.assertEqual(requests.codes.OK, response.status_code)
results = response.text.split('\n')
self.assertEqual(ExtractorLogTestData.LENGTH, len(results[:-1]))
class TestObjectStoreApiCore(TestCase):
def setUp(self):
self._api_key = os.environ['IMPORT_IO_API_KEY']
def test_create_crawl_run(self):
data = {
'extractorId': ObjectStoreCrawlRunTestData.EXTRACTOR_ID,
'failedUrlCount': ObjectStoreCrawlRunTestData.FAILED_URL_COUNT,
'successUrlCount': ObjectStoreCrawlRunTestData.SUCCESS_URL_COUNT,
'totalUrlCount': ObjectStoreCrawlRunTestData.TOTAL_URL_COUNT,
'rowCount': ObjectStoreCrawlRunTestData.ROW_COUNT,
'startedAt': ObjectStoreCrawlRunTestData.STARTED_AT,
'stoppedAt': ObjectStoreCrawlRunTestData.STOPPED_AT,
'state': ObjectStoreCrawlRunTestData.STATE
}
response = object_store_create(self._api_key, 'crawlrun', data)
result = response.json()
self.assertTrue('_meta' in result)
meta = result['_meta']
self.assertTrue('timestamp' in meta)
self.assertTrue('lastEditorGuid' in meta)
self.assertTrue('ownerGuid' in meta)
self.assertTrue('guid' in result)
self.assertTrue('successUrlCount' in result)
self.assertTrue('extractorId' in result)
self.assertTrue('stoppedAt' in result)
self.assertTrue('startedAt' in result)
self.assertTrue('failedUrlCount' in result)
self.assertTrue('totalUrlCount' in result)
self.assertTrue('state' in result)
self.assertTrue('rowCount' in result)
self.assertEqual(ObjectStoreCrawlRunTestData.EXTRACTOR_ID, result['extractorId'])
self.assertEqual(ObjectStoreCrawlRunTestData.STARTED_AT, result['startedAt'])
self.assertEqual(ObjectStoreCrawlRunTestData.STOPPED_AT, result['stoppedAt'])
self.assertEqual(ObjectStoreCrawlRunTestData.FAILED_URL_COUNT, result['failedUrlCount'])
self.assertEqual(ObjectStoreCrawlRunTestData.TOTAL_URL_COUNT, result['totalUrlCount'])
self.assertEqual(ObjectStoreCrawlRunTestData.SUCCESS_URL_COUNT, result['successUrlCount'])
self.assertEqual(ObjectStoreCrawlRunTestData.ROW_COUNT, result['rowCount'])
self.assertEqual(ObjectStoreCrawlRunTestData.STATE, result['state'])
def test_crawl_run_get(self):
response = object_store_get(self._api_key, 'crawlrun', CrawlRunGet.CRAWL_RUN_ID)
self.assertIsNotNone(response)
crawl_run = response.json()
self.assertEqual(CrawlRunGet.EXTRACTOR_ID, crawl_run['extractorId'])
def test_extractor_put_attachment(self):
response = object_store_put_attachment(self._api_key,
ObjectStoreExtractorPutUrlListAttachment.OBJECT_TYPE,
ObjectStoreExtractorPutUrlListAttachment.EXTRACTOR_ID,
ObjectStoreExtractorPutUrlListAttachment.ATTACHMENT_FIELD,
ObjectStoreExtractorPutUrlListAttachment.ATTACHMENT_CONTENTS,
ObjectStoreExtractorPutUrlListAttachment.ATTACHMENT_TYPE)
self.assertIsNotNone(response)
result = response.json()
self.assertTrue('guid' in result)
self.assertTrue('bucketGuid' in result)
self.assertTrue('objectGuid' in result)
self.assertEqual('urlList', result['field'])
self.assertEqual(66, int(result['size']))
def test_crawl_run_put_csv_attachment(self):
response = object_store_put_attachment(self._api_key,
ObjectStoreExtractorPutCsvAttachment.OBJECT_TYPE,
ObjectStoreExtractorPutCsvAttachment.CRAWL_RUN_ID,
ObjectStoreExtractorPutCsvAttachment.ATTACHMENT_FIELD,
ObjectStoreExtractorPutCsvAttachment.ATTACHMENT_CONTENTS,
ObjectStoreExtractorPutCsvAttachment.ATTACHMENT_TYPE)
self.assertIsNotNone(response)
result = response.json()
self.assertTrue('guid' in result)
self.assertTrue('bucketGuid' in result)
self.assertTrue('objectGuid' in result)
self.assertEqual('csv', result['field'])
self.assertEqual(66, int(result['size']))
def test_crawl_run_put_json_attachment(self):
response = object_store_put_attachment(self._api_key,
ObjectStoreExtractorPutJsonAttachment.OBJECT_TYPE,
ObjectStoreExtractorPutJsonAttachment.CRAWL_RUN_ID,
ObjectStoreExtractorPutJsonAttachment.ATTACHMENT_FIELD,
ObjectStoreExtractorPutJsonAttachment.ATTACHMENT_CONTENTS,
ObjectStoreExtractorPutJsonAttachment.ATTACHMENT_TYPE)
self.assertIsNotNone(response)
result = response.json()
self.assertIsNotNone(response)
self.assertTrue('guid' in result)
self.assertTrue('bucketGuid' in result)
self.assertTrue('objectGuid' in result)
self.assertEqual('json', result['field'])
def test_crawl_run_get_inputs_attachment(self):
pass
# response = object_store_get_attachment(self._api_key, )
def test_extractor_ownership_change(self):
response = object_store_change_ownership(api_key=self._api_key,
object_type='extractor',
object_id=ObjectStoreExtractorOwnership.EXTRACTOR_ID,
owner_id=ObjectStoreExtractorOwnership.NEW_OWNER_ID)
result = response.json()
print(result)
def test_object_store_stream_attachement(self):
with tempfile.NamedTemporaryFile() as temp:
object_store_stream_attachment(api_key=self._api_key,
object_id=CrawlRunFilesDownloadTestData.CRAWL_RUN_ID,
object_type='crawlrun',
attachment_field='files',
attachment_id=CrawlRunFilesDownloadTestData.ATTACHMENT_ID,
attachment_type='application/zip',
path=temp.name)
stats = os.stat(temp.name)
self.assertEqual(325672, stats.st_size)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Brief Summary
Attributes:
Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
__copyright__ = "Zhaoyu Luo"
import argparse
import random
import simpy
from simpy.events import AllOf
import node
class HDFS(node.BaseSim):
"""By default, HDFS owns one switch and one client machine, it would instantiate that automatically
The client machine would only be used to submit its task
"""
def __init__(self, env, namenode, replica_number=3, heartbeat_interval=3, heartbeat_size=1024,
enable_datanode_cache=True, enable_heartbeats=True, enable_block_report=True,
block_report_interval=30, balance_bandwidth=1024*1024, client_write_packet_size=1024*1024,
**kwargs):
super(HDFS, self).__init__(**kwargs)
self.env = env
self.id = "HDFS"
self.client_write_packet_size = client_write_packet_size
self.block_size = 64 * 1024 * 1024
self.replica_number = replica_number
self.enable_datanode_cache = enable_datanode_cache
self.enable_heartbeats = enable_heartbeats
self.enable_block_report = enable_block_report
self.heartbeat_size = heartbeat_size
self.heartbeat_interval = heartbeat_interval
self.block_report_interval = block_report_interval
#: dfs.datanode.balance.bandwidthPerSec
self.balance_bandwidth = balance_bandwidth
self.switch = node.Switch(env, **kwargs)
self.client = node.Node(env, "client", **kwargs)
self.switch.add_node(self.client)
self.datanodes = {}
if namenode:
self.set_namenode(namenode)
def start_services(self):
if self.enable_heartbeats:
self.start_hdfs_heartbeat()
if self.enable_block_report:
self.start_block_report()
def run_forever(self):
self.start_services()
self.env.run()
def run_until(self, until):
self.start_services()
self.env.run(until)
def start_block_report(self):
if len(self.datanodes) < 1 or not self.namenode:
self.critical("fail to start block report: no datanode exists")
return
for node_name in self.datanodes:
self.datanodes[node_name].start_block_report(self.block_report_interval)
self.critical("start HDFS block report")
def start_hdfs_heartbeat(self):
if len(self.datanodes) < 1 or not self.namenode:
self.critical("fail to start HDFS heartbeat: no datanode exists")
return
for node_name in self.datanodes:
self.switch.start_heartbeat(node_name, self.namenode.id, self.heartbeat_size, self.heartbeat_interval)
self.critical("start HDFS heartbeat")
def set_namenode(self, node):
self.namenode = node
self.switch.add_node(node)
self.datanodes = self.namenode.datanodes
def create_datanode(self, node_id, **kwargs):
datanode = node.DataNode(self.env, node_id, hdfs=self,
do_debug=self.do_debug, do_info=self.do_info, do_warning=self.do_warning, do_critical=self.do_critical,
**kwargs)
self.add_datanode(datanode)
def add_datanode(self, node):
self.datanodes[node.id] = node
self.switch.add_node(node)
def transfer_data(self, from_node_id, to_node_id, size, throttle_bandwidth=-1):
return self.env.process(self._transfer_data(from_node_id, to_node_id, size, throttle_bandwidth))
def _transfer_data(self, from_node_id, to_node_id, size, throttle_bandwidth=-1):
yield self.switch.process_ping(from_node_id, to_node_id, size, throttle_bandwidth)
if self.enable_datanode_cache:
yield self.datanodes[to_node_id].new_disk_buffer_write_request(size)
else:
yield self.datanodes[to_node_id].new_disk_write_request(size)
def replicate_file(self, file_name, size, node_sequence, throttle_bandwidth=-1):
return self.env.process(self._replicate_file(file_name, size, node_sequence, throttle_bandwidth))
def _replicate_file(self, file_name, size, node_sequence, throttle_bandwidth=-1):
self.info("REPLICATING\t%s\tin %s" % (file_name, node_sequence))
i = 0
while i < len(node_sequence) - 1:
yield self.transfer_data(node_sequence[i], node_sequence[i+1], size, throttle_bandwidth)
i += 1
self.info("REPLICATED\t%s\tin %s" % (file_name, node_sequence))
def create_file(self, file_name, size, node_sequence, throttle_bandwidth=-1):
return self.env.process(self._create_file(file_name, size, node_sequence, throttle_bandwidth))
def _create_file(self, file_name, size, node_sequence, throttle_bandwidth=-1):
"""big file would be splitted into packtes <= 64KB"""
# pipeline writing (divide into packets) to all datanodes
sent_file_size = 0
pipeline_events = []
i = 1
while sent_file_size < size:
sending_size = min(self.client_write_packet_size, size - sent_file_size)
p = self.replicate_file("%s.%i" % (file_name, i), sending_size, node_sequence, throttle_bandwidth)
pipeline_events.append(p)
sent_file_size += sending_size
i += 1
# wait for all ACKs
yield AllOf(self.env, pipeline_events)
if self.client.id in node_sequence:
node_sequence.remove(self.client.id)
self.namenode.register_file(file_name, node_sequence)
self.critical("ALL ACKs collected, put_file %s finished" % (file_name))
def put_files(self, num, size, throttle_bandwidth=-1):
"""This API is used by client"""
events = []
for i in range(num):
file_name = "hello.%i.txt" % i
datanode_names = self.namenode.find_datanodes_for_new_file(file_name, size, self.replica_number)
datanode_names.insert(0, self.client.id)
e = self.create_file(file_name, size, datanode_names, throttle_bandwidth)
events.append(e)
run_all = AllOf(self.env, events)
self.run_until(run_all)
self.critical("%i files stored in NameNode" % len(self.namenode.metadata))
return self.env.now
def regenerate_blocks(self, num):
"""TODO: it is justly randomly regenerate blocks, not according to block placement and its replica number"""
regenerate_events = []
i = 1
for i in range(num):
from_node_id, to_node_id = random.sample(self.namenode.datanodes.keys(), 2)
self.info("regenerating block %s->%s" % (from_node_id, to_node_id))
r = self.create_file("block.%s.dat" % i, self.block_size, [from_node_id, to_node_id], self.balance_bandwidth)
regenerate_events.append(r)
run_all = AllOf(self.env, regenerate_events)
self.run_until(run_all)
return self.env.now
def limplock_create_30_files(self):
"""create 30 64-MB files"""
self.put_files(30, self.block_size)
return self.env.now
def limplock_regenerate_90_blocks(self):
"""regenerate 90 blocks"""
self.info("regenerating 90 blocks: throttle_bandwidth: %s" % self.balance_bandwidth)
self.regenerate_blocks(90)
return self.env.now
def create_hdfs(env=None, number_of_datanodes=3, replica_number=3,
enable_block_report=True, enable_heartbeats=True, enable_datanode_cache=True,
default_bandwidth=100*1024*1024/8, default_disk_speed=80*1024*1024, heartbeat_interval=3,
heartbeat_size=16*1024, block_report_interval=30, client_write_packet_size=1024*1024, **kwargs):
if not env:
env = simpy.Environment()
hdfs = HDFS(env, namenode=None, replica_number=replica_number,
enable_block_report=enable_block_report, enable_heartbeats=enable_heartbeats,
heartbeat_interval=heartbeat_interval, heartbeat_size=heartbeat_size,
block_report_interval=block_report_interval, client_write_packet_size=client_write_packet_size, **kwargs)
namenode = node.NameNode(env, "namenode", hdfs, **kwargs)
hdfs.set_namenode(namenode)
for i in range(number_of_datanodes):
hdfs.create_datanode("datanode%i" % i, disk_speed=default_disk_speed, default_bandwidth=default_bandwidth)
return hdfs
def create_silent_hdfs(**kwargs):
return create_hdfs(do_debug=False, do_info=False, do_warning=False, do_critical=False, **kwargs)
def main():
"""Main function only in command line"""
from sys import argv
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--disk-speed', type=int, default=80*1024*1024, help='disk speed')
parser.add_argument('--nodes', type=int, default=20, help='number of datanodes')
parser.add_argument('--files', type=int, default=30, help='number of generate files')
args = parser.parse_args()
print(args)
hdfs = create_hdfs(number_of_datanodes=args.nodes, default_disk_speed=args.disk_speed,
do_debug=True,
)
if True:
hdfs.put_files(args.files, 64*1024*1024)
else:
hdfs.regenerate_blocks(args.files)
if __name__ == '__main__':
main()
|
|
import code
import itertools
import keyword
import os
import sys
import unicodedata
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.widgets.utils import itemmodels
from PyQt4 import QtGui
from PyQt4.QtCore import Qt, QRegExp, QByteArray
from PyQt4.QtGui import (
QTextCursor, QFont, QColor, QPalette, QListView, QSizePolicy, QAction,
QMenu, QKeySequence, QSplitter, QToolButton, QItemSelectionModel,
QFileDialog
)
__all__ = ["OWPythonScript"]
def text_format(foreground = Qt.black, weight = QFont.Normal):
fmt = QtGui.QTextCharFormat()
fmt.setForeground(QtGui.QBrush(foreground))
fmt.setFontWeight(weight)
return fmt
class PythonSyntaxHighlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent = None):
self.keywordFormat = text_format(Qt.blue, QFont.Bold)
self.stringFormat = text_format(Qt.darkGreen)
self.defFormat = text_format(Qt.black, QFont.Bold)
self.commentFormat = text_format(Qt.lightGray)
self.decoratorFormat = text_format(Qt.darkGray)
self.keywords = list(keyword.kwlist)
self.rules = [(QRegExp(r"\b%s\b" % kwd), self.keywordFormat)
for kwd in self.keywords] + \
[(QRegExp(r"\bdef\s+([A-Za-z_]+[A-Za-z0-9_]+)\s*\("),
self.defFormat),
(QRegExp(r"\bclass\s+([A-Za-z_]+[A-Za-z0-9_]+)\s*\("),
self.defFormat),
(QRegExp(r"'.*'"), self.stringFormat),
(QRegExp(r'".*"'), self.stringFormat),
(QRegExp(r"#.*"), self.commentFormat),
(QRegExp(r"@[A-Za-z_]+[A-Za-z0-9_]+"),
self.decoratorFormat)]
self.multilineStart = QRegExp(r"(''')|" + r'(""")')
self.multilineEnd = QRegExp(r"(''')|" + r'(""")')
super().__init__(parent)
def highlightBlock(self, text):
for pattern, format in self.rules:
exp = QRegExp(pattern)
index = exp.indexIn(text)
while index >= 0:
length = exp.matchedLength()
if exp.numCaptures() > 0:
self.setFormat(exp.pos(1), len(str(exp.cap(1))), format)
else:
self.setFormat(exp.pos(0), len(str(exp.cap(0))), format)
index = exp.indexIn(text, index + length)
# Multi line strings
start = self.multilineStart
end = self.multilineEnd
self.setCurrentBlockState(0)
startIndex, skip = 0, 0
if self.previousBlockState() != 1:
startIndex, skip = start.indexIn(text), 3
while startIndex >= 0:
endIndex = end.indexIn(text, startIndex + skip)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLen = len(text) - startIndex
else:
commentLen = endIndex - startIndex + 3
self.setFormat(startIndex, commentLen, self.stringFormat)
startIndex, skip = (start.indexIn(text,
startIndex + commentLen + 3),
3)
class PythonScriptEditor(QtGui.QPlainTextEdit):
INDENT = 4
def lastLine(self):
text = str(self.toPlainText())
pos = self.textCursor().position()
index = text.rfind("\n", 0, pos)
text = text[index: pos].lstrip("\n")
return text
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return:
text = self.lastLine()
indent = len(text) - len(text.lstrip())
if text.strip() == "pass" or text.strip().startswith("return "):
indent = max(0, indent - self.INDENT)
elif text.strip().endswith(":"):
indent += self.INDENT
super().keyPressEvent(event)
self.insertPlainText(" " * indent)
elif event.key() == Qt.Key_Tab:
self.insertPlainText(" " * self.INDENT)
elif event.key() == Qt.Key_Backspace:
text = self.lastLine()
if text and not text.strip():
cursor = self.textCursor()
for i in range(min(self.INDENT, len(text))):
cursor.deletePreviousChar()
else:
super().keyPressEvent(event)
else:
super().keyPressEvent(event)
class PySparkConsole(QtGui.QPlainTextEdit, code.InteractiveConsole):
def __init__(self, locals = None, parent = None, sc = None):
QtGui.QPlainTextEdit.__init__(self, parent)
code.InteractiveConsole.__init__(self, locals)
self.sc = sc
self.history, self.historyInd = [""], 0
self.loop = self.interact()
next(self.loop)
def setLocals(self, locals):
self.locals = locals
def interact(self, banner = None):
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = ('Type "help", "copyright", "credits" or "license" '
'for more information.')
spark_logo = """
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version {version}
/_/
""".format(version = self.sc.version)
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
self.write(spark_logo+"\n")
self.write("SparkContext available as sc, HiveContext available as hc.\n")
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
self.new_prompt(prompt)
yield
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def raw_input(self, prompt):
input = str(self.document().lastBlock().previous().text())
return input[len(prompt):]
def new_prompt(self, prompt):
self.write(prompt)
self.newPromptPos = self.textCursor().position()
def write(self, data):
cursor = QTextCursor(self.document())
cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
cursor.insertText(data)
self.setTextCursor(cursor)
self.ensureCursorVisible()
def writelines(self, lines):
for line in lines:
self.write(line)
def push(self, line):
if self.history[0] != line:
self.history.insert(0, line)
self.historyInd = 0
saved = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = self, self
return code.InteractiveConsole.push(self, line)
finally:
sys.stdout, sys.stderr = saved
def setLine(self, line):
cursor = QTextCursor(self.document())
cursor.movePosition(QTextCursor.End)
cursor.setPosition(self.newPromptPos, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.insertText(line)
self.setTextCursor(cursor)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return:
self.write("\n")
next(self.loop)
elif event.key() == Qt.Key_Up:
self.historyUp()
elif event.key() == Qt.Key_Down:
self.historyDown()
elif event.key() == Qt.Key_Tab:
self.complete()
elif event.key() in [Qt.Key_Left, Qt.Key_Backspace]:
if self.textCursor().position() > self.newPromptPos:
QtGui.QPlainTextEdit.keyPressEvent(self, event)
else:
QtGui.QPlainTextEdit.keyPressEvent(self, event)
def historyUp(self):
self.setLine(self.history[self.historyInd])
self.historyInd = min(self.historyInd + 1, len(self.history) - 1)
def historyDown(self):
self.setLine(self.history[self.historyInd])
self.historyInd = max(self.historyInd - 1, 0)
def complete(self):
pass
def _moveCursorToInputLine(self):
"""
Move the cursor to the input line if not already there. If the cursor
if already in the input line (at position greater or equal to
`newPromptPos`) it is left unchanged, otherwise it is moved at the
end.
"""
cursor = self.textCursor()
pos = cursor.position()
if pos < self.newPromptPos:
cursor.movePosition(QTextCursor.End)
self.setTextCursor(cursor)
def pasteCode(self, source):
"""
Paste source code into the console.
"""
self._moveCursorToInputLine()
for line in interleave(source.splitlines(), itertools.repeat("\n")):
if line != "\n":
self.insertPlainText(line)
else:
self.write("\n")
next(self.loop)
def insertFromMimeData(self, source):
"""
Reimplemented from QPlainTextEdit.insertFromMimeData.
"""
if source.hasText():
self.pasteCode(str(source.text()))
return
def interleave(seq1, seq2):
"""
Interleave elements of `seq2` between consecutive elements of `seq1`.
>>> list(interleave([1, 3, 5], [2, 4]))
[1, 2, 3, 4, 5]
"""
iterator1, iterator2 = iter(seq1), iter(seq2)
leading = next(iterator1)
for element in iterator1:
yield leading
yield next(iterator2)
leading = element
yield leading
class Script(object):
Modified = 1
MissingFromFilesystem = 2
def __init__(self, name, script, flags = 0, filename = None):
self.name = name
self.script = script
self.flags = flags
self.filename = filename
class ScriptItemDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent):
super().__init__(parent)
def displayText(self, script, locale):
if script.flags & Script.Modified:
return "*" + script.name
else:
return script.name
def paint(self, painter, option, index):
script = index.data(Qt.DisplayRole)
if script.flags & Script.Modified:
option = QtGui.QStyleOptionViewItemV4(option)
option.palette.setColor(QPalette.Text, QColor(Qt.red))
option.palette.setColor(QPalette.Highlight, QColor(Qt.darkRed))
super().paint(painter, option, index)
def createEditor(self, parent, option, index):
return QtGui.QLineEdit(parent)
def setEditorData(self, editor, index):
script = index.data(Qt.DisplayRole)
editor.setText(script.name)
def setModelData(self, editor, model, index):
model[index.row()].name = str(editor.text())
def select_row(view, row):
"""
Select a `row` in an item view
"""
selmodel = view.selectionModel()
selmodel.select(view.model().index(row, 0),
QItemSelectionModel.ClearAndSelect)
from orangecontrib.spark.base.shared_spark_context import SharedSparkContext
class OWPySparkScript(SharedSparkContext, widget.OWWidget):
priority = 3
name = "PySpark Script"
description = "Write a PySpark script and run it on input"
icon = "../icons/PythonScript.svg"
inputs = [("in_object", object, "setObject")]
outputs = [("out_object", object, widget.Dynamic)]
libraryListSource = \
Setting([Script("Hello world", "print('Hello world')\n")])
currentScriptIndex = Setting(0)
splitterState = Setting(None)
auto_execute = Setting(False)
def __init__(self):
super().__init__()
self.in_data = None
self.in_distance = None
self.in_learner = None
self.in_classifier = None
self.in_object = None
self.auto_execute = False
for s in self.libraryListSource:
s.flags = 0
self._cachedDocuments = { }
self.infoBox = gui.widgetBox(self.controlArea, 'Info')
gui.label(
self.infoBox, self,
"<p>Execute python script.</p><p>Input variables:<ul><li> " + \
"<li>".join(t.name for t in self.inputs) + \
"</ul></p><p>Output variables:<ul><li>" + \
"<li>".join(t.name for t in self.outputs) + \
"</ul></p>"
)
self.libraryList = itemmodels.PyListModel(
[], self,
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable)
self.libraryList.wrap(self.libraryListSource)
self.controlBox = gui.widgetBox(self.controlArea, 'Library')
self.controlBox.layout().setSpacing(1)
self.libraryView = QListView(
editTriggers = QListView.DoubleClicked |
QListView.EditKeyPressed,
sizePolicy = QSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Preferred)
)
self.libraryView.setItemDelegate(ScriptItemDelegate(self))
self.libraryView.setModel(self.libraryList)
self.libraryView.selectionModel().selectionChanged.connect(
self.onSelectedScriptChanged
)
self.controlBox.layout().addWidget(self.libraryView)
w = itemmodels.ModelActionsWidget()
self.addNewScriptAction = action = QAction("+", self)
action.setToolTip("Add a new script to the library")
action.triggered.connect(self.onAddScript)
w.addAction(action)
action = QAction(unicodedata.lookup("MINUS SIGN"), self)
action.setToolTip("Remove script from library")
action.triggered.connect(self.onRemoveScript)
w.addAction(action)
action = QAction("Update", self)
action.setToolTip("Save changes in the editor to library")
action.setShortcut(QKeySequence(QKeySequence.Save))
action.triggered.connect(self.commitChangesToLibrary)
w.addAction(action)
action = QAction("More", self, toolTip = "More actions")
new_from_file = QAction("Import a script from a file", self)
save_to_file = QAction("Save selected script to a file", self)
save_to_file.setShortcut(QKeySequence(QKeySequence.SaveAs))
new_from_file.triggered.connect(self.onAddScriptFromFile)
save_to_file.triggered.connect(self.saveScript)
menu = QMenu(w)
menu.addAction(new_from_file)
menu.addAction(save_to_file)
action.setMenu(menu)
button = w.addAction(action)
button.setPopupMode(QToolButton.InstantPopup)
w.layout().setSpacing(1)
self.controlBox.layout().addWidget(w)
gui.auto_commit(self.controlArea, self, "auto_execute", "Execute")
self.splitCanvas = QSplitter(Qt.Vertical, self.mainArea)
self.mainArea.layout().addWidget(self.splitCanvas)
self.defaultFont = defaultFont = \
"Monaco" if sys.platform == "darwin" else "Courier"
self.textBox = gui.widgetBox(self, 'Python script')
self.splitCanvas.addWidget(self.textBox)
self.text = PythonScriptEditor(self)
self.textBox.layout().addWidget(self.text)
self.textBox.setAlignment(Qt.AlignVCenter)
self.text.setTabStopWidth(4)
self.text.modificationChanged[bool].connect(self.onModificationChanged)
self.saveAction = action = QAction("&Save", self.text)
action.setToolTip("Save script to file")
action.setShortcut(QKeySequence(QKeySequence.Save))
action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
action.triggered.connect(self.saveScript)
self.consoleBox = gui.widgetBox(self, 'Console')
self.splitCanvas.addWidget(self.consoleBox)
self.__dict__['sc'] = self._sc
self.__dict__['hc'] = self._hc
self.console = PySparkConsole(self.__dict__, self, sc = self.sc)
self.consoleBox.layout().addWidget(self.console)
self.console.document().setDefaultFont(QFont(defaultFont))
self.consoleBox.setAlignment(Qt.AlignBottom)
self.console.setTabStopWidth(4)
select_row(self.libraryView, self.currentScriptIndex)
self.splitCanvas.setSizes([2, 1])
if self.splitterState is not None:
self.splitCanvas.restoreState(QByteArray(self.splitterState))
self.splitCanvas.splitterMoved[int, int].connect(self.onSpliterMoved)
self.controlArea.layout().addStretch(1)
self.resize(800, 600)
def setExampleTable(self, et):
self.in_data = et
def setDistanceMatrix(self, dm):
self.in_distance = dm
def setLearner(self, learner):
self.in_learner = learner
def setClassifier(self, classifier):
self.in_classifier = classifier
def setObject(self, obj):
self.in_object = obj
def handleNewSignals(self):
self.unconditional_commit()
def selectedScriptIndex(self):
rows = self.libraryView.selectionModel().selectedRows()
if rows:
return [i.row() for i in rows][0]
else:
return None
def setSelectedScript(self, index):
select_row(self.libraryView, index)
def onAddScript(self, *args):
self.libraryList.append(Script("New script", "", 0))
self.setSelectedScript(len(self.libraryList) - 1)
def onAddScriptFromFile(self, *args):
filename = QFileDialog.getOpenFileName(
self, 'Open Python Script',
os.path.expanduser("~/"),
'Python files (*.py)\nAll files(*.*)'
)
filename = str(filename)
if filename:
name = os.path.basename(filename)
contents = open(filename, "rb").read().decode("utf-8", errors = "ignore")
self.libraryList.append(Script(name, contents, 0, filename))
self.setSelectedScript(len(self.libraryList) - 1)
def onRemoveScript(self, *args):
index = self.selectedScriptIndex()
if index is not None:
del self.libraryList[index]
select_row(self.libraryView, max(index - 1, 0))
def onSaveScriptToFile(self, *args):
index = self.selectedScriptIndex()
if index is not None:
self.saveScript()
def onSelectedScriptChanged(self, selected, deselected):
index = [i.row() for i in selected.indexes()]
if index:
current = index[0]
if current >= len(self.libraryList):
self.addNewScriptAction.trigger()
return
self.text.setDocument(self.documentForScript(current))
self.currentScriptIndex = current
def documentForScript(self, script = 0):
if type(script) != Script:
script = self.libraryList[script]
if script not in self._cachedDocuments:
doc = QtGui.QTextDocument(self)
doc.setDocumentLayout(QtGui.QPlainTextDocumentLayout(doc))
doc.setPlainText(script.script)
doc.setDefaultFont(QFont(self.defaultFont))
doc.highlighter = PythonSyntaxHighlighter(doc)
doc.modificationChanged[bool].connect(self.onModificationChanged)
doc.setModified(False)
self._cachedDocuments[script] = doc
return self._cachedDocuments[script]
def commitChangesToLibrary(self, *args):
index = self.selectedScriptIndex()
if index is not None:
self.libraryList[index].script = self.text.toPlainText()
self.text.document().setModified(False)
self.libraryList.emitDataChanged(index)
def onModificationChanged(self, modified):
index = self.selectedScriptIndex()
if index is not None:
self.libraryList[index].flags = Script.Modified if modified else 0
self.libraryList.emitDataChanged(index)
def onSpliterMoved(self, pos, ind):
self.splitterState = str(self.splitCanvas.saveState())
def updateSelecetdScriptState(self):
index = self.selectedScriptIndex()
if index is not None:
script = self.libraryList[index]
self.libraryList[index] = Script(script.name,
self.text.toPlainText(),
0)
def saveScript(self):
index = self.selectedScriptIndex()
if index is not None:
script = self.libraryList[index]
filename = script.filename
else:
filename = os.path.expanduser("~/")
filename = QFileDialog.getSaveFileName(
self, 'Save Python Script',
filename,
'Python files (*.py)\nAll files(*.*)'
)
if filename:
fn = ""
head, tail = os.path.splitext(filename)
if not tail:
fn = head + ".py"
else:
fn = filename
f = open(fn, 'w')
f.write(self.text.toPlainText())
f.close()
def commit(self):
self._script = str(self.text.toPlainText())
self.console.write("\nRunning script:\n")
self.console.push("exec(_script)")
self.console.new_prompt(sys.ps1)
for out in self.outputs:
signal = out.name
self.send(signal, getattr(self, signal, None))
|
|
#!/usr/bin/env python
"""Implementation of condition mechanism for client-side file-finder."""
import abc
import re
from typing import Iterator
from typing import NamedTuple
from typing import Optional
from typing import Pattern
from grr_response_client import streaming
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.util import precondition
class MetadataCondition(metaclass=abc.ABCMeta):
"""An abstract class representing conditions on the file metadata."""
@abc.abstractmethod
def Check(self, stat):
"""Checks whether condition is met.
Args:
stat: An `util.filesystem.Stat` object.
Returns:
True if the condition is met.
"""
pass
@staticmethod
def Parse(conditions):
"""Parses the file finder condition types into the condition objects.
Args:
conditions: An iterator over `FileFinderCondition` objects.
Yields:
`MetadataCondition` objects that correspond to the file-finder conditions.
"""
kind = rdf_file_finder.FileFinderCondition.Type
classes = {
kind.MODIFICATION_TIME: ModificationTimeCondition,
kind.ACCESS_TIME: AccessTimeCondition,
kind.INODE_CHANGE_TIME: InodeChangeTimeCondition,
kind.SIZE: SizeCondition,
kind.EXT_FLAGS: ExtFlagsCondition,
}
for condition in conditions:
try:
yield classes[condition.condition_type](condition)
except KeyError:
pass
class ModificationTimeCondition(MetadataCondition):
"""A condition checking modification time of a file."""
def __init__(self, params):
super().__init__()
self.params = params.modification_time
def Check(self, stat):
min_mtime = self.params.min_last_modified_time.AsMicrosecondsSinceEpoch()
max_mtime = self.params.max_last_modified_time.AsMicrosecondsSinceEpoch()
return min_mtime <= stat.GetModificationTime() <= max_mtime
class AccessTimeCondition(MetadataCondition):
"""A condition checking access time of a file."""
def __init__(self, params):
super().__init__()
self.params = params.access_time
def Check(self, stat):
min_atime = self.params.min_last_access_time.AsMicrosecondsSinceEpoch()
max_atime = self.params.max_last_access_time.AsMicrosecondsSinceEpoch()
return min_atime <= stat.GetAccessTime() <= max_atime
class InodeChangeTimeCondition(MetadataCondition):
"""A condition checking change time of inode of a file."""
def __init__(self, params):
super().__init__()
self.params = params.inode_change_time
def Check(self, stat):
params = self.params
min_ctime = params.min_last_inode_change_time.AsMicrosecondsSinceEpoch()
max_ctime = params.max_last_inode_change_time.AsMicrosecondsSinceEpoch()
return min_ctime <= stat.GetChangeTime() <= max_ctime
class SizeCondition(MetadataCondition):
"""A condition checking size of a file."""
def __init__(self, params):
super().__init__()
self.params = params.size
def Check(self, stat):
min_fsize = self.params.min_file_size
max_fsize = self.params.max_file_size
return min_fsize <= stat.GetSize() <= max_fsize
class ExtFlagsCondition(MetadataCondition):
"""A condition checking extended flags of a file.
Args:
params: A `FileFinderCondition` instance.
"""
def __init__(self, params):
super().__init__()
self.params = params.ext_flags
def Check(self, stat):
return self.CheckOsx(stat) and self.CheckLinux(stat)
def CheckLinux(self, stat):
flags = stat.GetLinuxFlags()
bits_set = self.params.linux_bits_set
bits_unset = self.params.linux_bits_unset
return (bits_set & flags) == bits_set and (bits_unset & flags) == 0
def CheckOsx(self, stat):
flags = stat.GetOsxFlags()
bits_set = self.params.osx_bits_set
bits_unset = self.params.osx_bits_unset
return (bits_set & flags) == bits_set and (bits_unset & flags) == 0
class ContentCondition(metaclass=abc.ABCMeta):
"""An abstract class representing conditions on the file contents."""
@abc.abstractmethod
def Search(self, fd):
"""Searches specified file for particular content.
Args:
fd: A file descriptor of the file that needs to be searched.
Yields:
`BufferReference` objects pointing to file parts with matching content.
"""
pass
@staticmethod
def Parse(conditions):
"""Parses the file finder condition types into the condition objects.
Args:
conditions: An iterator over `FileFinderCondition` objects.
Yields:
`ContentCondition` objects that correspond to the file-finder conditions.
"""
kind = rdf_file_finder.FileFinderCondition.Type
classes = {
kind.CONTENTS_LITERAL_MATCH: LiteralMatchCondition,
kind.CONTENTS_REGEX_MATCH: RegexMatchCondition,
}
for condition in conditions:
try:
yield classes[condition.condition_type](condition)
except KeyError:
pass
OVERLAP_SIZE = 1024 * 1024
CHUNK_SIZE = 10 * 1024 * 1024
def Scan(self, fd,
matcher: "Matcher") -> Iterator[rdf_client.BufferReference]:
"""Scans given file searching for occurrences of given pattern.
Args:
fd: A file descriptor of the file that needs to be searched.
matcher: A matcher object specifying a pattern to search for.
Yields:
`BufferReference` objects pointing to file parts with matching content.
"""
streamer = streaming.Streamer(
chunk_size=self.CHUNK_SIZE, overlap_size=self.OVERLAP_SIZE)
offset = self.params.start_offset
amount = self.params.length
for chunk in streamer.StreamFile(fd, offset=offset, amount=amount):
for span in chunk.Scan(matcher):
ctx_begin = max(span.begin - self.params.bytes_before, 0)
ctx_end = min(span.end + self.params.bytes_after, len(chunk.data))
ctx_data = chunk.data[ctx_begin:ctx_end]
yield rdf_client.BufferReference(
offset=chunk.offset + ctx_begin,
length=len(ctx_data),
data=ctx_data)
if self.params.mode == self.params.Mode.FIRST_HIT:
return
class LiteralMatchCondition(ContentCondition):
"""A content condition that lookups a literal pattern."""
def __init__(self, params):
super().__init__()
self.params = params.contents_literal_match
def Search(self, fd):
matcher = LiteralMatcher(self.params.literal.AsBytes())
for match in self.Scan(fd, matcher):
yield match
class RegexMatchCondition(ContentCondition):
"""A content condition that lookups regular expressions."""
def __init__(self, params):
super().__init__()
self.params = params.contents_regex_match
def Search(self, fd) -> Iterator[rdf_client.BufferReference]:
regex = re.compile(self.params.regex.AsBytes(), flags=re.I | re.S | re.M)
matcher = RegexMatcher(regex)
for match in self.Scan(fd, matcher):
yield match
class Matcher(metaclass=abc.ABCMeta):
"""An abstract class for objects able to lookup byte strings."""
Span = NamedTuple("Span", [("begin", int), ("end", int)]) # pylint: disable=invalid-name
@abc.abstractmethod
def Match(self, data: bytes, position: int) -> Optional["Matcher.Span"]:
"""Matches the given data object starting at specified position.
Args:
data: A byte string to pattern match on.
position: First position at which the search is started on.
Returns:
A `Span` object if the matcher finds something in the data.
"""
pass
class RegexMatcher(Matcher):
"""A regex wrapper that conforms to the `Matcher` interface.
Args:
regex: An RDF regular expression that the matcher represents.
"""
def __init__(self, regex: Pattern[bytes]):
precondition.AssertType(regex, Pattern)
super().__init__()
self._regex = regex
def Match(self, data: bytes, position: int) -> Optional[Matcher.Span]:
precondition.AssertType(data, bytes)
precondition.AssertType(position, int)
match = self._regex.search(data[position:])
if not match:
return None
begin, end = match.span()
return Matcher.Span(begin=position + begin, end=position + end)
class LiteralMatcher(Matcher):
"""An exact string matcher that conforms to the `Matcher` interface.
Args:
literal: A byte string pattern that the matcher matches.
"""
def __init__(self, literal: bytes):
precondition.AssertType(literal, bytes)
super().__init__()
self._literal = literal
def Match(self, data: bytes, position: int) -> Optional[Matcher.Span]:
precondition.AssertType(data, bytes)
precondition.AssertType(position, int)
offset = data.find(self._literal, position)
if offset == -1:
return None
return Matcher.Span(begin=offset, end=offset + len(self._literal))
|
|
"""
Support for interacting with Snapcast clients.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.snapcast/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
DOMAIN, SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_PORT, STATE_IDLE, STATE_OFF, STATE_ON,
STATE_PLAYING, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['snapcast==2.0.9']
_LOGGER = logging.getLogger(__name__)
DATA_KEY = 'snapcast'
SERVICE_SNAPSHOT = 'snapcast_snapshot'
SERVICE_RESTORE = 'snapcast_restore'
SUPPORT_SNAPCAST_CLIENT = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET
SUPPORT_SNAPCAST_GROUP = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET |\
SUPPORT_SELECT_SOURCE
GROUP_PREFIX = 'snapcast_group_'
GROUP_SUFFIX = 'Snapcast Group'
CLIENT_PREFIX = 'snapcast_client_'
CLIENT_SUFFIX = 'Snapcast Client'
SERVICE_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Snapcast platform."""
import snapcast.control
from snapcast.control.server import CONTROL_PORT
host = config.get(CONF_HOST)
port = config.get(CONF_PORT, CONTROL_PORT)
async def _handle_service(service):
"""Handle services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
devices = [device for device in hass.data[DATA_KEY]
if device.entity_id in entity_ids]
for device in devices:
if service.service == SERVICE_SNAPSHOT:
device.snapshot()
elif service.service == SERVICE_RESTORE:
await device.async_restore()
hass.services.async_register(
DOMAIN, SERVICE_SNAPSHOT, _handle_service, schema=SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RESTORE, _handle_service, schema=SERVICE_SCHEMA)
try:
server = await snapcast.control.create_server(
hass.loop, host, port, reconnect=True)
except socket.gaierror:
_LOGGER.error("Could not connect to Snapcast server at %s:%d",
host, port)
return
# Note: Host part is needed, when using multiple snapservers
hpid = '{}:{}'.format(host, port)
groups = [SnapcastGroupDevice(group, hpid) for group in server.groups]
clients = [SnapcastClientDevice(client, hpid) for client in server.clients]
devices = groups + clients
hass.data[DATA_KEY] = devices
async_add_entities(devices)
class SnapcastGroupDevice(MediaPlayerDevice):
"""Representation of a Snapcast group device."""
def __init__(self, group, uid_part):
"""Initialize the Snapcast group device."""
group.set_callback(self.schedule_update_ha_state)
self._group = group
self._uid = '{}{}_{}'.format(GROUP_PREFIX, uid_part,
self._group.identifier)
@property
def state(self):
"""Return the state of the player."""
return {
'idle': STATE_IDLE,
'playing': STATE_PLAYING,
'unknown': STATE_UNKNOWN,
}.get(self._group.stream_status, STATE_UNKNOWN)
@property
def unique_id(self):
"""Return the ID of snapcast group."""
return self._uid
@property
def name(self):
"""Return the name of the device."""
return '{}{}'.format(GROUP_PREFIX, self._group.identifier)
@property
def source(self):
"""Return the current input source."""
return self._group.stream
@property
def volume_level(self):
"""Return the volume level."""
return self._group.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._group.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_GROUP
@property
def source_list(self):
"""List of available input sources."""
return list(self._group.streams_by_name().keys())
@property
def device_state_attributes(self):
"""Return the state attributes."""
name = '{} {}'.format(self._group.friendly_name, GROUP_SUFFIX)
return {
'friendly_name': name
}
@property
def should_poll(self):
"""Do not poll for state."""
return False
async def async_select_source(self, source):
"""Set input source."""
streams = self._group.streams_by_name()
if source in streams:
await self._group.set_stream(streams[source].identifier)
self.async_schedule_update_ha_state()
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._group.set_muted(mute)
self.async_schedule_update_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._group.set_volume(round(volume * 100))
self.async_schedule_update_ha_state()
def snapshot(self):
"""Snapshot the group state."""
self._group.snapshot()
async def async_restore(self):
"""Restore the group state."""
await self._group.restore()
class SnapcastClientDevice(MediaPlayerDevice):
"""Representation of a Snapcast client device."""
def __init__(self, client, uid_part):
"""Initialize the Snapcast client device."""
client.set_callback(self.schedule_update_ha_state)
self._client = client
self._uid = '{}{}_{}'.format(CLIENT_PREFIX, uid_part,
self._client.identifier)
@property
def unique_id(self):
"""
Return the ID of this snapcast client.
Note: Host part is needed, when using multiple snapservers
"""
return self._uid
@property
def name(self):
"""Return the name of the device."""
return '{}{}'.format(CLIENT_PREFIX, self._client.identifier)
@property
def volume_level(self):
"""Return the volume level."""
return self._client.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._client.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_CLIENT
@property
def state(self):
"""Return the state of the player."""
if self._client.connected:
return STATE_ON
return STATE_OFF
@property
def device_state_attributes(self):
"""Return the state attributes."""
name = '{} {}'.format(self._client.friendly_name, CLIENT_SUFFIX)
return {
'friendly_name': name
}
@property
def should_poll(self):
"""Do not poll for state."""
return False
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._client.set_muted(mute)
self.async_schedule_update_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._client.set_volume(round(volume * 100))
self.async_schedule_update_ha_state()
def snapshot(self):
"""Snapshot the client state."""
self._client.snapshot()
async def async_restore(self):
"""Restore the client state."""
await self._client.restore()
|
|
# Copyright 2008 Peter Bulychev
#
# This file is part of Clone Digger.
#
# Clone Digger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Clone Digger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Clone Digger. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import difflib
import re
import copy
import traceback
import os.path
from cgi import escape
import arguments
import anti_unification
import python_compiler
from abstract_syntax_tree import AbstractSyntaxTree
class Report:
def __init__(self):
self._error_info = []
self._clones = []
self._timers = []
self._file_names = []
def addFileName(self, file_name):
self._file_names.append(file_name)
def addErrorInformation(self, error_info):
self._error_info.append(error_info)
def addClone(self, clone):
self._clones.append(clone)
def sortByCloneSize(self):
def f(a,b):
return cmp(b.getMaxCoveredLineNumbersCount(), a.getMaxCoveredLineNumbersCount())
self._clones.sort(f)
def startTimer(self, descr):
self._timers.append([descr, time.time(), time.ctime()])
sys.stdout.flush()
def stopTimer(self, descr=''):
self._timers[-1][1] = time.time() - self._timers[-1][1]
def getTimerValues(self):
return self._timers
def getTotalTime(self):
return sum([i[1] for i in self.getTimerValues()])
class CPDXMLReport(Report):
def __init__(self):
Report.__init__(self)
self._mark_to_statement_hash = None
def setMarkToStatementHash(self, mark_to_statement_hash):
self._mark_to_statement_hash = mark_to_statement_hash
def writeReport(self, file_name):
f = open(file_name, 'w')
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<pmd-cpd>\n')
for clone in self._clones:
token_numbers = [sum([s.getTokenCount() for s in clone[i]]) for i in (0,1)]
f.write('<duplication lines="' + str(max([len(set(clone[i].getCoveredLineNumbers())) for i in [0,1]] )) + '" tokens="' + str(max(token_numbers)) +'">\n')
for i in [0,1]:
f.write('<file line="' + str(1 + min(clone[i].getCoveredLineNumbers())) + '" path="' + os.path.abspath(clone[i].getSourceFile().getFileName()) + '"/>\n')
f.write('<codefragment>\n')
f.write('<![CDATA[\n')
for line in clone[0].getSourceLines():
f.write(line.replace(']]>','-CLONEDIGGER REMOVED CDATAEND-'))
f.write('\n')
f.write(']]>\n')
f.write('</codefragment>\n')
f.write('</duplication>\n')
f.write('</pmd-cpd>\n')
f.close()
class HTMLReport(Report):
def __init__(self):
Report.__init__(self)
self._mark_to_statement_hash = None
def setMarkToStatementHash(self, mark_to_statement_hash):
self._mark_to_statement_hash = mark_to_statement_hash
def writeReport(self, file_name):
# TODO REWRITE! This function code was created in a hurry
eclipse_start = '\n<!--ECLIPSE START-->'
eclipse_end = '\n<!--ECLIPSE END-->'
def format_line_code(s):
s = s.replace('\t', ' ')
s = s.replace(' ', ' ')
return '<span style="font-family: monospace;">%s</span>'%(s,)
errors_info = "\n".join(['<P> <FONT COLOR=RED> %s </FONT> </P>' % (error_info.replace('\n', '<BR>'),) for error_info in self._error_info])
very_strange_const = 'VERY_STRANGE_CONST'
clone_descriptions = []
for clone_i in range(len(self._clones)):
try:
clone = self._clones[clone_i]
s = '<P>'
s += '<B>Clone # %d</B><BR>'%(clone_i +1,)
# s = '<P> Clone detected in source files "%s" and "%s" <BR>\n' % (sequences[0].getSourceFile().getFileName(), sequences[1].getSourceFile().getFileName())
s+= 'Distance between two fragments = %d <BR>' %(clone.calcDistance())
s+= 'Clone size = ' + str(max([len(set(clone[i].getCoveredLineNumbers())) for i in [0,1]] ))
s+= '<TABLE NOWRAP WIDTH=100% BORDER=1>'
s+= eclipse_start
s+= '<TR>'
for j in [0,1]:
s+= '<TD> <a href="clone://%s?%d&%d"> Go to this fragment in Eclipse </a> </TD>'%(clone[j].getSourceFile().getFileName(), min(clone[j][0].getCoveredLineNumbers()), max(clone[j][-1].getCoveredLineNumbers()))
if j==0:
s += '<TD></TD>'
s+= '</TR>'
s+= eclipse_end
for j in [0,1]:
s+= '<TD>'
s+= 'Source file "%s"<BR>' %(clone[j].getSourceFile().getFileName(),)
if clone[j][0].getCoveredLineNumbers() == []:
# TODO remove after...
pdb.set_trace()
s+= 'The first line is %d' %(min(clone[j][0].getCoveredLineNumbers())+1,)
s+= '</TD>'
if j == 0:
s+= '<TD></TD>'
s+= '</TR>'
for i in range(clone[0].getLength()):
s += '<TR>\n'
t = []
statements = [clone[j][i] for j in [0,1]]
#print statements[0].__class__
def diff_highlight(seqs):
s = difflib.SequenceMatcher(lambda x:x == '<BR>\n')
s.set_seqs(seqs[0], seqs[1])
blocks = s.get_matching_blocks()
if not ((blocks[0][0]==0) and (blocks[0][1]==0)):
blocks = [(0,0,0)] + blocks
r = ['', '']
for i in range(len(blocks)):
block = blocks[i]
for j in [0,1]:
r[j] += escape(seqs[j][block[j]:block[j]+block[2]])
if (i < (len(blocks)-1)):
nextblock = blocks[i+1]
for j in [0,1]:
r[j] += '<span'+very_strange_const+'style="color:rgb(255,0,0);">%s</span>'%\
(escape(seqs[j][block[j]+block[2]:nextblock[j]]),)
return r
# preparation of indentation
indentations = (set(), set())
for j in (0,1):
for source_line in statements[j].getSourceLines():
indentations[j].add(re.findall('^\s*', source_line)[0].replace('\t', 4*' '))
indentations = (list(indentations[0]), list(indentations[1]))
indentations[0].sort()
indentations[1].sort()
source_lines = ([], [])
def use_diff():
for j in (0,1):
for source_line in statements[j].getSourceLines():
indent1 = re.findall('^\s*', source_line)[0]
indent2 = indent1.replace('\t', 4*' ')
source_line = re.sub('^' + indent1, indentations[j].index(indent2)*' ', source_line)
source_lines[j].append(source_line)
d = diff_highlight([('\n'.join(source_lines[j])) for j in [0,1]])
d = [format_line_code(d[i].replace('\n', '<BR>\n')) for i in [0,1]]
d = [d[i].replace(very_strange_const, ' ') for i in (0,1)]
u = anti_unification.Unifier(statements[0], statements[1])
return d,u
if arguments.use_diff:
(d,u) = use_diff()
#print u.getSize()
else:
try:
def rec_correct_as_string(t1, t2, s1, s2):
def highlight(s):
return '<span style="color: rgb(255, 0, 0);">' + s + '</span>'
class NewAsString:
def __init__(self, s):
self.s = highlight(s)
def __call__(self):
return self.s
def set_as_string_node_parent(t):
if not isinstance(t, AbstractSyntaxTree):
t = t.getParent()
n = NewAsString(t.ast_node.as_string())
t.ast_node.as_string = n
if (t1 in s1) or (t2 in s2):
for t in (t1, t2):
set_as_string_node_parent(t)
return
assert(len(t1.getChilds()) == len(t2.getChilds()))
for i in range(len(t1.getChilds())):
c1 = t1.getChilds()[i]
c2 = t2.getChilds()[i]
rec_correct_as_string(c1, c2, s1, s2)
(s1, s2) = (statements[0], statements[1])
u = anti_unification.Unifier(s1, s2)
rec_correct_as_string(s1, s2, u.getSubstitutions()[0].getMap().values(), u.getSubstitutions()[1].getMap().values() )
d = [None, None]
for j in (0,1):
d[j] = statements[j].ast_node.as_string()
lines = d[j].split('\n')
for ii in range(len(lines)):
temp_line = ''
jj = 0
try:
while lines[ii][jj] == ' ':
temp_line += ' '
jj += 1
except IndexError:
# suppress errors if line has no leading spaces
pass
temp_line += lines[ii][jj:]
lines[ii] = temp_line
d[j] = '\n'.join(lines)
d[j] = d[j].replace('\n', '<BR>\n')
except:
print 'The following error occured during highlighting of differences on the AST level:'
traceback.print_exc()
print 'using diff highlight'
(d,u) = use_diff()
for j in [0,1]:
t.append('<TD>\n' + d[j] + '</TD>\n')
if u.getSize() > 0:
color = 'RED'
else:
color = 'AQUA'
s+= t[0] + '<TD style="width: 10px;" BGCOLOR=%s> </TD>'%(color,) + t[1]
s += '</TR>\n'
s+= '</TABLE> </P> <HR>'
clone_descriptions.append(s)
except:
print "Clone info can't be written to the report. "
traceback.print_exc()
descr = """<P>Source files: %d</P>
<a href = "javascript:unhide('files');">Click here to show/hide file names</a><div id="files" class="hidden"><P><B>Source files:</B><BR>%s</P></div>
<P>Clones detected: %d</P>
<P>%d of %d lines are duplicates (%.2f%%) </P>
<P>
<B>Parameters<BR> </B>
clustering_threshold = %d<BR>
distance_threshold = %d<BR>
size_threshold = %d<BR>
hashing_depth = %d<BR>
clusterize_using_hash = %s<BR>
clusterize_using_dcup = %s<BR>
</P>
""" % (len(self._file_names), ', <BR>'.join(self._file_names), len(self._clones), self.covered_source_lines_count, self.all_source_lines_count, (not self.all_source_lines_count and 100) or 100*self.covered_source_lines_count/float(self.all_source_lines_count), arguments.clustering_threshold, arguments.distance_threshold, arguments.size_threshold, arguments.hashing_depth, str(arguments.clusterize_using_hash), str(arguments.clusterize_using_dcup))
if arguments.print_time:
timings = ''
timings += '<B>Time elapsed</B><BR>'
timings += '<BR>\n'.join(['%s : %.2f seconds'%(i[0], i[1]) for i in self._timers])
timings += '<BR>\n Total time: %.2f' % (self.getTotalTime())
timings += '<BR>\n Started at: ' + self._timers[0][2]
timings += '<BR>\n Finished at: ' + self._timers[-1][2]
else:
timings = ''
marks_report = ''
if self._mark_to_statement_hash:
marks_report += '<P>Top 20 statement marks:'
marks = self._mark_to_statement_hash.keys()
marks.sort(lambda y,x:cmp(len(self._mark_to_statement_hash[x]), len(self._mark_to_statement_hash[y])))
counter = 0
for mark in marks[:20]:
counter += 1
marks_report += '<BR>' + str(len(self._mark_to_statement_hash[mark])) + ':' + str(mark.getUnifierTree()) + "<a href=\"javascript:unhide('stmt%d');\">show/hide representatives</a> "%(counter,)
marks_report += '<div id="stmt%d" class="hidden"> <BR>'%(counter,)
for statement in self._mark_to_statement_hash[mark]:
marks_report += str(statement) + '<BR>'
marks_report += '</div>'
marks_report += '</P>'
warnings = ''
if arguments.use_diff:
warnings += '<P>(*) Warning: the highlighting of differences is based on diff and doesn\'t reflect the tree-based clone detection algorithm.</P>'
save_to = eclipse_start + '<b><a href="file://%s">Save this report</a></b>'%(file_name,) +eclipse_end
HTML_code = """
<HTML>
<HEAD>
<TITLE> CloneDigger Report </TITLE>
<script type="text/javascript">
function unhide(divID) {
var item = document.getElementById(divID);
if (item) {
item.className=(item.className=='hidden')?'unhidden':'hidden';
}
}
</script>
<style type="text/css">
.hidden { display: none; }
.unhidden { display: block; }
.preformatted {
border: 1px dashed #3c78b5;
font-size: 11px;
font-family: Courier;
margin: 10px;
line-height: 13px;
}
.preformattedHeader {
background-color: #f0f0f0;
border-bottom: 1px dashed #3c78b5;
padding: 3px;
text-align: center;
}
.preformattedContent {
background-color: #f0f0f0;
padding: 3px;
}
<!--
<div class="preformatted"><div class="preformattedContent">
<pre>Clone Digger
</pre>
</div></div>
-->
</style>
</HEAD>
<BODY>
%s
%s
%s
%s
%s
%s
%s
<HR>
Clone Digger is aimed to find software clones in Python and Java programs. It is provided under the GPL license and can be downloaded from the site <a href="http://clonedigger.sourceforge.net">http://clonedigger.sourceforge.net</a>
</BODY>
</HTML>""" % (errors_info, save_to, descr, timings, '<BR>\n'.join(clone_descriptions), marks_report, warnings)
f = open(file_name, 'w')
f.write(re.sub(eclipse_start+'.*?'+eclipse_end, '' ,HTML_code))
f.close()
if arguments.eclipse_output:
f = open(arguments.eclipse_output, 'w')
f.write(HTML_code)
f.close()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService, quorum
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.transactional_message_copier import TransactionalMessageCopier
from kafkatest.utils import is_int
from ducktape.tests.test import Test
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
import time
class TransactionsTest(Test):
"""Tests transactions by transactionally copying data from a source topic to
a destination topic and killing the copy process as well as the broker
randomly through the process. In the end we verify that the final output
topic contains exactly one committed copy of each message in the input
topic.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(TransactionsTest, self).__init__(test_context=test_context)
self.input_topic = "input-topic"
self.output_topic = "output-topic"
self.num_brokers = 3
# Test parameters
self.num_input_partitions = 2
self.num_output_partitions = 3
self.num_seed_messages = 100000
self.transaction_size = 750
# The transaction timeout should be lower than the progress timeout, but at
# least as high as the request timeout (which is 30s by default). When the
# client is hard-bounced, progress may depend on the previous transaction
# being aborted. When the broker is hard-bounced, we may have to wait as
# long as the request timeout to get a `Produce` response and we do not
# want the coordinator timing out the transaction.
self.transaction_timeout = 40000
self.progress_timeout_sec = 60
self.consumer_group = "transactions-test-consumer-group"
self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None
self.kafka = KafkaService(test_context,
num_nodes=self.num_brokers,
zk=self.zk,
controller_num_nodes_override=1)
def setUp(self):
if self.zk:
self.zk.start()
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
def get_messages_from_topic(self, topic, num_messages):
consumer = self.start_consumer(topic, group_id="verifying_consumer")
return self.drain_consumer(consumer, num_messages)
def bounce_brokers(self, clean_shutdown):
for node in self.kafka.nodes:
if clean_shutdown:
self.kafka.restart_node(node, clean_shutdown = True)
else:
self.kafka.stop_node(node, clean_shutdown = False)
gracePeriodSecs = 5
if self.zk:
wait_until(lambda: len(self.kafka.pids(node)) == 0 and not self.kafka.is_registered(node),
timeout_sec=self.kafka.zk_session_timeout + gracePeriodSecs,
err_msg="Failed to see timely deregistration of hard-killed broker %s" % str(node.account))
else:
brokerSessionTimeoutSecs = 18
wait_until(lambda: len(self.kafka.pids(node)) == 0,
timeout_sec=brokerSessionTimeoutSecs + gracePeriodSecs,
err_msg="Failed to see timely disappearance of process for hard-killed broker %s" % str(node.account))
time.sleep(brokerSessionTimeoutSecs + gracePeriodSecs)
self.kafka.start_node(node)
self.kafka.await_no_under_replicated_partitions()
def create_and_start_message_copier(self, input_topic, input_partition, output_topic, transactional_id, use_group_metadata):
message_copier = TransactionalMessageCopier(
context=self.test_context,
num_nodes=1,
kafka=self.kafka,
transactional_id=transactional_id,
consumer_group=self.consumer_group,
input_topic=input_topic,
input_partition=input_partition,
output_topic=output_topic,
max_messages=-1,
transaction_size=self.transaction_size,
transaction_timeout=self.transaction_timeout,
use_group_metadata=use_group_metadata
)
message_copier.start()
wait_until(lambda: message_copier.alive(message_copier.nodes[0]),
timeout_sec=10,
err_msg="Message copier failed to start after 10 s")
return message_copier
def bounce_copiers(self, copiers, clean_shutdown):
for _ in range(3):
for copier in copiers:
wait_until(lambda: copier.progress_percent() >= 20.0,
timeout_sec=self.progress_timeout_sec,
err_msg="%s : Message copier didn't make enough progress in %ds. Current progress: %s" \
% (copier.transactional_id, self.progress_timeout_sec, str(copier.progress_percent())))
self.logger.info("%s - progress: %s" % (copier.transactional_id,
str(copier.progress_percent())))
copier.restart(clean_shutdown)
def create_and_start_copiers(self, input_topic, output_topic, num_copiers, use_group_metadata):
copiers = []
for i in range(0, num_copiers):
copiers.append(self.create_and_start_message_copier(
input_topic=input_topic,
output_topic=output_topic,
input_partition=i,
transactional_id="copier-" + str(i),
use_group_metadata=use_group_metadata
))
return copiers
def start_consumer(self, topic_to_read, group_id):
consumer = ConsoleConsumer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic_to_read,
group_id=group_id,
message_validator=is_int,
from_beginning=True,
isolation_level="read_committed")
consumer.start()
# ensure that the consumer is up.
wait_until(lambda: (len(consumer.messages_consumed[1]) > 0) == True,
timeout_sec=60,
err_msg="Consumer failed to consume any messages for %ds" %\
60)
return consumer
def drain_consumer(self, consumer, num_messages):
# wait until we read at least the expected number of messages.
# This is a safe check because both failure modes will be caught:
# 1. If we have 'num_seed_messages' but there are duplicates, then
# this is checked for later.
#
# 2. If we never reach 'num_seed_messages', then this will cause the
# test to fail.
wait_until(lambda: len(consumer.messages_consumed[1]) >= num_messages,
timeout_sec=90,
err_msg="Consumer consumed only %d out of %d messages in %ds" %\
(len(consumer.messages_consumed[1]), num_messages, 90))
consumer.stop()
return consumer.messages_consumed[1]
def copy_messages_transactionally(self, failure_mode, bounce_target,
input_topic, output_topic,
num_copiers, num_messages_to_copy,
use_group_metadata):
"""Copies messages transactionally from the seeded input topic to the
output topic, either bouncing brokers or clients in a hard and soft
way as it goes.
This method also consumes messages in read_committed mode from the
output topic while the bounces and copy is going on.
It returns the concurrently consumed messages.
"""
copiers = self.create_and_start_copiers(input_topic=input_topic,
output_topic=output_topic,
num_copiers=num_copiers,
use_group_metadata=use_group_metadata)
concurrent_consumer = self.start_consumer(output_topic,
group_id="concurrent_consumer")
clean_shutdown = False
if failure_mode == "clean_bounce":
clean_shutdown = True
if bounce_target == "brokers":
self.bounce_brokers(clean_shutdown)
elif bounce_target == "clients":
self.bounce_copiers(copiers, clean_shutdown)
copier_timeout_sec = 120
for copier in copiers:
wait_until(lambda: copier.is_done,
timeout_sec=copier_timeout_sec,
err_msg="%s - Failed to copy all messages in %ds." %\
(copier.transactional_id, copier_timeout_sec))
self.logger.info("finished copying messages")
return self.drain_consumer(concurrent_consumer, num_messages_to_copy)
def setup_topics(self):
self.kafka.topics = {
self.input_topic: {
"partitions": self.num_input_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
},
self.output_topic: {
"partitions": self.num_output_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
}
}
@cluster(num_nodes=9)
@matrix(failure_mode=["hard_bounce", "clean_bounce"],
bounce_target=["brokers", "clients"],
check_order=[True, False],
use_group_metadata=[True, False])
def test_transactions(self, failure_mode, bounce_target, check_order, use_group_metadata, metadata_quorum=quorum.all):
security_protocol = 'PLAINTEXT'
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
self.kafka.logs["kafka_data_1"]["collect_default"] = True
self.kafka.logs["kafka_data_2"]["collect_default"] = True
self.kafka.logs["kafka_operational_logs_debug"]["collect_default"] = True
if check_order:
# To check ordering, we simply create input and output topics
# with a single partition.
# We reduce the number of seed messages to copy to account for the fewer output
# partitions, and thus lower parallelism. This helps keep the test
# time shorter.
self.num_seed_messages = self.num_seed_messages // 3
self.num_input_partitions = 1
self.num_output_partitions = 1
self.setup_topics()
self.kafka.start()
input_messages = self.seed_messages(self.input_topic, self.num_seed_messages)
concurrently_consumed_messages = self.copy_messages_transactionally(
failure_mode, bounce_target, input_topic=self.input_topic,
output_topic=self.output_topic, num_copiers=self.num_input_partitions,
num_messages_to_copy=self.num_seed_messages, use_group_metadata=use_group_metadata)
output_messages = self.get_messages_from_topic(self.output_topic, self.num_seed_messages)
concurrently_consumed_message_set = set(concurrently_consumed_messages)
output_message_set = set(output_messages)
input_message_set = set(input_messages)
num_dups = abs(len(output_messages) - len(output_message_set))
num_dups_in_concurrent_consumer = abs(len(concurrently_consumed_messages)
- len(concurrently_consumed_message_set))
assert num_dups == 0, "Detected %d duplicates in the output stream" % num_dups
assert input_message_set == output_message_set, "Input and output message sets are not equal. Num input messages %d. Num output messages %d" %\
(len(input_message_set), len(output_message_set))
assert num_dups_in_concurrent_consumer == 0, "Detected %d dups in concurrently consumed messages" % num_dups_in_concurrent_consumer
assert input_message_set == concurrently_consumed_message_set, \
"Input and concurrently consumed output message sets are not equal. Num input messages: %d. Num concurrently_consumed_messages: %d" %\
(len(input_message_set), len(concurrently_consumed_message_set))
if check_order:
assert input_messages == sorted(input_messages), "The seed messages themselves were not in order"
assert output_messages == input_messages, "Output messages are not in order"
assert concurrently_consumed_messages == output_messages, "Concurrently consumed messages are not in order"
|
|
import subprocess
import contextlib
import resource
import tempfile
import logging
import signal
import socket
import shutil
import time
import glob
import os
import re
l = logging.getLogger("tracer.qemu_runner")
import angr
from .tracerpov import TracerPoV
from .tinycore import TinyCore
try:
import shellphish_qemu
except ImportError as e:
raise ImportError("Unable to import shellphish_qemu, which is required by QEMURunner. Please install it before proceeding.") from e
class RunnerEnvironmentError(Exception):
pass
class QEMURunner:
"""
Trace an angr path with a concrete input using QEMU.
"""
def __init__(
self, binary=None, input=None, project=None, record_trace=True, record_stdout=False,
record_magic=True, record_core=False, seed=None, memory_limit="8G", bitflip=False, report_bad_args=False,
use_tiny_core=False, max_size=None, qemu=None, argv=None, library_path=None, ld_linux=None,
trace_log_limit=2**30, trace_timeout=10, exec_func=None
): #pylint:disable=redefined-builtin
"""
:param argv : Optionally specify argv params (i,e,: ['./calc', 'parm1']).
:param binary : Path to the binary to be traced.
:param input : Concrete input to feed to binary (bytes or CGC TracerPoV).
:param project : The original project.
:param record_trace : Whether or not to record the basic block trace.
:param record_stdout : Whether ot not to record the output of tracing process.
:param record_magic : Whether ot not to record the magic flag page as reported by QEMU.
:param record_core : Whether or not to record the core file in case of crash.
:param report_bad_args: Enable CGC QEMU's report bad args option.
:param use_tiny_core : Use minimal core loading.
:param trace_source_path: Path to the trace source to be used.
Defaults to binary name with no params.
:param max_size : Optionally set max size of input. Defaults to size
of preconstrained input.
:param qemu : Path to QEMU to be forced used.
:param argv : Optionally specify argv params (i,e,: ['./calc', 'parm1']).
Defaults to binary name with no params.
:param trace_log_limit: Optionally specify the dynamic trace log file
size limit in bytes, defaults to 1G.
:param trace_timeout : Optionally specify the dymamic time limit in seconds
defaults to 10 seconds.
:param exec_func : Optional function to run instead of self._exec_func.
"""
if type(input) not in (bytes, TracerPoV):
raise RunnerEnvironmentError("Input for tracing should be either a bytestring or a TracerPoV for CGC PoV file.")
if binary is not None:
self._filename = binary
self._p = angr.Project(self._filename)
elif project is not None:
self._p = project
self._filename = project.filename
else:
raise ValueError("Must specify project or binary.")
# Hack for architecture and OS.
self.os = self._p.loader.main_object.os
self.base_addr = self._p.loader.main_object.min_addr
self.rebase = False
self.input = input
self._record_trace = record_trace
self._record_core = record_core
self.argv = argv
# Basic block trace.
self.trace = [ ]
# In case of crash and record_core is set.
self.reg_vals = None
self._state = None
self.memory = None
self._use_tiny_core = use_tiny_core
self.trace_source = None
self._trace_source_path = qemu
# Does the input cause a crash?
self.crash_mode = False
# If the input causes a crash, what address does it crash at?
self.crash_addr = None
self.stdout = None
# compatibility for now
self.is_multicb = False
self.tmout = False
self.returncode = None
self._record_magic = record_magic and self.os == 'cgc'
if type(library_path) is str:
library_path = [library_path]
self._library_path = library_path
self._ld_linux = ld_linux
if isinstance(seed, int):
seed = str(seed)
self._seed = seed
self._memory_limit = memory_limit
self._bitflip = bitflip
self._report_bad_args = report_bad_args
if self.input is None:
raise ValueError("Must specify input.")
# validate seed
if self._seed is not None:
try:
iseed = int(self._seed)
if iseed > 4294967295 or iseed < 0:
raise ValueError
except ValueError:
raise ValueError("The passed seed is either not an integer or is not between 0 and UINT_MAX")
self.input_max_size = max_size or len(input) if type(input) is bytes else None
self.trace_log_limit = trace_log_limit
self.trace_timeout = trace_timeout
self.sanity_check()
l.debug("Accumulating basic block trace...")
l.debug("tracer qemu path: %s", self._trace_source_path)
self.stdout = None
# We need this to keep symbolic traces following the same path
# as their dynamic counterpart
self.magic = None
if exec_func:
self._exec_func = exec_func
if record_stdout:
fd, tmp = tempfile.mkstemp(prefix="stdout_" + os.path.basename(self._p.filename))
# will set crash_mode correctly
self._run(stdout_file=tmp)
with open(tmp, "rb") as f:
self.stdout = f.read()
os.close(fd)
os.remove(tmp)
else:
# will set crash_mode correctly
self._run()
### SETUP
def sanity_check(self):
self._check_binary()
self._check_qemu_install()
def _check_binary(self):
# check the binary
if not os.access(self._filename, os.X_OK):
if os.path.isfile(self._filename):
error_msg = "\"%s\" binary is not executable" % self._filename
l.error(error_msg)
raise RunnerEnvironmentError(error_msg)
else:
error_msg = "\"%s\" binary does not exist" % self._filename
l.error(error_msg)
raise RunnerEnvironmentError(error_msg)
# hack for the OS
if self.os != 'cgc' and not self.os.startswith("UNIX"):
error_msg = "\"%s\" runs on an OS not supported by the qemu runner (only cgc and elf at the moment)" % self._filename
l.error(error_msg)
raise RunnerEnvironmentError(error_msg)
# try to find the install base
self._check_qemu_install()
def _check_qemu_install(self):
"""
Check the install location of QEMU.
"""
if self.os == "cgc":
suffix = "tracer" if self._record_trace else "base"
self.trace_source = "shellphish-qemu-cgc-%s" % suffix
else:
self.trace_source = "shellphish-qemu-linux-%s" % self._p.arch.qemu_name
if self._trace_source_path is None or not os.access(self._trace_source_path, os.X_OK):
if self._trace_source_path is not None:
l.warning("Problem accessing forced %s. Using our default %s.", self._trace_source_path, self.trace_source)
self._trace_source_path = shellphish_qemu.qemu_path(self.trace_source)
if not os.access(self._trace_source_path, os.X_OK):
if os.path.isfile(self._trace_source_path):
error_msg = "%s is not executable" % self.trace_source
l.error(error_msg)
raise RunnerEnvironmentError(error_msg)
else:
error_msg = "\"%s\" does not exist" % self._trace_source_path
l.error(error_msg)
raise RunnerEnvironmentError(error_msg)
### DYNAMIC TRACING
def __get_rlimit_func(self):
def set_rlimits():
# here we limit the logsize
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
resource.setrlimit(resource.RLIMIT_FSIZE, (self.trace_log_limit, self.trace_log_limit))
return set_rlimits
@staticmethod
@contextlib.contextmanager
def _mk_tmpdir():
tmpdir = tempfile.mkdtemp(prefix="/tmp/tracer_")
try:
yield tmpdir
finally:
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(tmpdir)
@staticmethod
@contextlib.contextmanager
def _tmpfile(**kwargs):
fd, tmpfile = tempfile.mkstemp(**kwargs)
os.close(fd)
try:
yield tmpfile
finally:
with contextlib.suppress(FileNotFoundError):
os.unlink(tmpfile)
@contextlib.contextmanager
def _exec_func(self, qemu_variant, qemu_args, program_args, ld_path=None, stdin=None, stdout=None, stderr=None, record_trace=True, record_magic=False, core_target=None): #pylint:disable=method-hidden
#pylint:disable=subprocess-popen-preexec-fn
with self._mk_tmpdir() as tmpdir, self._tmpfile(dir="/dev/shm/", prefix="tracer-log-") as trace_filename, self._tmpfile(dir="/dev/shm/", prefix="tracer-magic-") as magic_filename, contextlib.ExitStack() as exit_stack:
cmd_args = [ qemu_variant ]
cmd_args += qemu_args
cmd_args += ["-C", tmpdir]
# hardcode an argv[0]
#cmd_args += [ "-0", program_args[0] ]
# record the trace, if we want to
if record_trace:
if 'cgc' in qemu_variant:
cmd_args += ["-d", "exec", "-D", trace_filename]
else:
cmd_args += ["-d", "exec,nochain,page", "-D", trace_filename]
else:
trace_filename = None
cmd_args += ["-enable_double_empty_exiting"]
# If the binary is CGC we'll also take this opportunity to read in the magic page.
if record_magic:
cmd_args += ["-magicdump", magic_filename]
else:
magic_filename = None
if ld_path:
cmd_args.append(ld_path)
# and the program
cmd_args += program_args
# set up files
stdin_file = subprocess.DEVNULL if stdin is None else exit_stack.enter_context(open(stdin, 'wb')) if type(stdin) is str else stdin
stdout_file = subprocess.DEVNULL if stdout is None else exit_stack.enter_context(open(stdout, 'wb')) if type(stdout) is str else stdout
stderr_file = subprocess.DEVNULL if stderr is None else exit_stack.enter_context(open(stderr, 'wb')) if type(stderr) is str else stderr
r = { }
r['process'] = subprocess.Popen(
cmd_args,
stdin=stdin_file, stdout=stdout_file, stderr=stderr_file,
preexec_fn=self.__get_rlimit_func()
)
try:
yield r
r['returncode'] = r['process'].wait(timeout=self.trace_timeout)
r['timeout'] = False
# save the trace
r['trace'] = ''
if record_trace:
with open(trace_filename, 'rb') as tf:
r['trace'] = tf.read()
# save the magic
r['magic'] = ''
if record_magic:
with open(magic_filename, 'rb') as tf:
r['magic'] = tf.read()
# save the core and clean up the original core
core_glob = glob.glob(os.path.join(tmpdir, "qemu_"+os.path.basename(program_args[0])+"_*.core"))
if core_target and core_glob:
shutil.copy(core_glob[0], core_target)
if core_glob:
os.unlink(core_glob[0])
except subprocess.TimeoutExpired:
r['process'].terminate()
r['returncode'] = r['process'].wait()
if record_trace and 'trace' not in r:
r['trace'] = b''
if record_magic and 'magic' not in r:
r['magic'] = b''
r['timeout'] = True
return r
def _run(self, stdout_file=None):
qemu_variant = self._trace_source_path
qemu_args = [ ]
if self._bitflip:
qemu_args.append("-bitflip")
if self._seed is not None:
qemu_args.append("-seed")
qemu_args.append(str(self._seed))
if self._report_bad_args:
qemu_args += ["-report_bad_args"]
if 'cgc' not in self._trace_source_path:
qemu_args += ['-E', 'LD_BIND_NOW=1']
if self._library_path:
qemu_args += ['-E', 'LD_LIBRARY_PATH=' + ':'.join(self._library_path)]
# Memory limit option is only available in shellphish-qemu-cgc-*
if 'cgc' in self._trace_source_path:
qemu_args += ["-m", self._memory_limit]
program_args = self.argv or [self._filename]
do_pov = type(self.input) is not bytes
if do_pov:
l.debug("Tracing as pov file")
in_s, out_s = socket.socketpair()
else:
in_s = subprocess.PIPE
out_s = None
with self._tmpfile(prefix='tracer-core-') as core_target:
with self._exec_func(
qemu_variant, qemu_args, program_args, ld_path=self._ld_linux,
stdin=in_s, stdout=stdout_file,
record_trace=self._record_trace, record_magic=self._record_magic,
core_target=core_target if self._record_core else None
) as exec_details:
if do_pov:
for write in self.input.writes:
out_s.send(write)
time.sleep(.01)
else:
exec_details['process'].communicate(self.input, timeout=self.trace_timeout)
self.returncode = exec_details['returncode']
self.tmout = exec_details['timeout']
# did a crash occur?
if self.returncode < 0:
if abs(self.returncode) == signal.SIGSEGV or abs(self.returncode) == signal.SIGILL:
l.info("Input caused a crash (signal %d) during dynamic tracing", abs(self.returncode))
l.debug(repr(self.input))
l.debug("Crash mode is set")
self.crash_mode = True
if self._record_core:
# find core file
a_mesg = "Empty core file generated"
assert os.path.getsize(core_target) > 0, a_mesg
if self._use_tiny_core:
self._load_tiny_core(core_target)
else:
self._load_core_values(core_target)
if self._record_trace:
try:
trace = exec_details['trace']
addrs = []
# Find where qemu loaded the binary. Primarily for PIE
qemu_base_addr = int(trace.split(b"start_code")[1].split(b"\n")[0], 16)
if self.base_addr != qemu_base_addr and self._p.loader.main_object.pic:
self.base_addr = qemu_base_addr
self.rebase = True
prog = re.compile(br'Trace (.*) \[(?P<addr>.*)\].*' if 'cgc' in qemu_variant else br'Trace (.*) \[(?P<something1>.*)\/(?P<addr>.*)\/(?P<flags>.*)\].*')
for t in trace.split(b'\n'):
m = prog.match(t)
if m is not None:
addr_str = m.group('addr')
addrs.append(int(addr_str, base=16))
else:
continue
# grab the faulting address
if self.crash_mode:
lastline = trace.split(b'\n')[-2]
if lastline.startswith(b"Trace") or lastline.find(b"Segmentation") == -1:
l.warning("Trace return code was less than zero, but the last line of the trace does not"
"contain the uncaught exception error from qemu."
"If using an older version of shellphish_qemu try using 'ulimit -Sc 0' or "
"updating to a newer version of shellphish_qemu.")
self.crash_addr = int(lastline.split(b'[')[1].split(b']')[0], 16)
self.trace = addrs
l.debug("Trace consists of %d basic blocks", len(self.trace))
except IndexError:
l.warning("The trace is found to be malformed. "
"it is possible that the log file size exceeds the 1G limit, "
"meaning that there might be infinite loops in the target program.")
if self._record_magic:
self.magic = exec_details['magic']
a_mesg = "Magic content read from QEMU improper size, should be a page in length"
assert len(self.magic) == 0x1000, a_mesg
def _load_core_values(self, core_file):
p = angr.Project(core_file)
self.reg_vals = p.loader.main_object.thread_registers()
self._state = p.factory.entry_state()
self.memory = self._state.memory
def _load_tiny_core(self, core_file):
tc = TinyCore(core_file)
self.reg_vals = tc.registers
self.memory = None
|
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for V-Trace algorithm.
V-Trace is a form of importance sampling correction that was introduced by
Espeholt et al. in the context of an off-policy actor-critic agent (IMPALA).
This subpackage implements the specific targets used in IMPALA to implement
both the value and the policy. Note however that the V-Trace return estimate is
a special case of the multistep return estimates from `multistep.py`.
"""
import collections
import chex
import jax
import jax.numpy as jnp
Array = chex.Array
VTraceOutput = collections.namedtuple(
'vtrace_output', ['errors', 'pg_advantage', 'q_estimate'])
def vtrace(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
lambda_: float = 1.0,
clip_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates V-Trace errors from importance weights.
V-trace computes TD-errors from multistep trajectories by applying
off-policy corrections based on clipped importance sampling ratios.
See "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561).
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance sampling ratios at time t-1.
lambda_: scalar mixing parameter lambda.
clip_rho_threshold: clip threshold for importance weights.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
V-Trace error.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1], [1, 1, 1, 1, 1])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1],
[float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# Clip importance sampling ratios.
c_tm1 = jnp.minimum(1.0, rho_tm1) * lambda_
clipped_rhos_tm1 = jnp.minimum(clip_rho_threshold, rho_tm1)
# Compute the temporal difference errors.
td_errors = clipped_rhos_tm1 * (r_t + discount_t * v_t - v_tm1)
# Work backwards computing the td-errors.
err = 0.0
errors = []
for i in reversed(range(v_t.shape[0])):
err = td_errors[i] + discount_t[i] * c_tm1[i] * err
errors.insert(0, err)
# Return errors, maybe disabling gradient flow through bootstrap targets.
return jax.lax.select(
stop_target_gradients,
jax.lax.stop_gradient(jnp.array(errors) + v_tm1) - v_tm1,
jnp.array(errors))
def leaky_vtrace(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
alpha_: float = 1.0,
lambda_: float = 1.0,
clip_rho_threshold: float = 1.0,
stop_target_gradients: bool = True):
"""Calculates Leaky V-Trace errors from importance weights.
Leaky-Vtrace is a combination of Importance sampling and V-trace, where the
degree of mixing is controlled by a scalar `alpha` (that may be meta-learnt).
See "Self-Tuning Deep Reinforcement Learning"
by Zahavy et al. (https://arxiv.org/abs/2002.12928)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
alpha_: mixing parameter for Importance Sampling and V-trace.
lambda_: scalar mixing parameter lambda.
clip_rho_threshold: clip threshold for importance weights.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
Leaky V-Trace error.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1], [1, 1, 1, 1, 1])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1],
[float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# Mix clipped and unclipped importance sampling ratios.
c_tm1 = (
(1 - alpha_) * rho_tm1 + alpha_ * jnp.minimum(1.0, rho_tm1)) * lambda_
clipped_rhos_tm1 = (
(1 - alpha_) * rho_tm1 + alpha_ * jnp.minimum(clip_rho_threshold, rho_tm1)
)
# Compute the temporal difference errors.
td_errors = clipped_rhos_tm1 * (r_t + discount_t * v_t - v_tm1)
# Work backwards computing the td-errors.
err = 0.0
errors = []
for i in reversed(range(v_t.shape[0])):
err = td_errors[i] + discount_t[i] * c_tm1[i] * err
errors.insert(0, err)
# Return errors, maybe disabling gradient flow through bootstrap targets.
return jax.lax.select(
stop_target_gradients,
jax.lax.stop_gradient(jnp.array(errors) + v_tm1) - v_tm1,
jnp.array(errors))
def vtrace_td_error_and_advantage(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
lambda_: float = 1.0,
clip_rho_threshold: float = 1.0,
clip_pg_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> VTraceOutput:
"""Calculates V-Trace errors and PG advantage from importance weights.
This functions computes the TD-errors and policy gradient Advantage terms
as used by the IMPALA distributed actor-critic agent.
See "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
lambda_: scalar mixing parameter lambda.
clip_rho_threshold: clip threshold for importance ratios.
clip_pg_rho_threshold: clip threshold for policy gradient importance ratios.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
a tuple of V-Trace error, policy gradient advantage, and estimated Q-values.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1], 1)
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1], float)
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
errors = vtrace(
v_tm1, v_t, r_t, discount_t, rho_tm1,
lambda_, clip_rho_threshold, stop_target_gradients)
targets_tm1 = errors + v_tm1
q_bootstrap = jnp.concatenate([
lambda_ * targets_tm1[1:] + (1 - lambda_) * v_tm1[1:],
v_t[-1:],
], axis=0)
q_estimate = r_t + discount_t * q_bootstrap
clipped_pg_rho_tm1 = jnp.minimum(clip_pg_rho_threshold, rho_tm1)
pg_advantages = clipped_pg_rho_tm1 * (q_estimate - v_tm1)
return VTraceOutput(
errors=errors, pg_advantage=pg_advantages, q_estimate=q_estimate)
def leaky_vtrace_td_error_and_advantage(
v_tm1: chex.Array,
v_t: chex.Array,
r_t: chex.Array,
discount_t: chex.Array,
rho_tm1: chex.Array,
alpha: float = 1.0,
lambda_: float = 1.0,
clip_rho_threshold: float = 1.0,
clip_pg_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> VTraceOutput:
"""Calculates Leaky V-Trace errors and PG advantage from importance weights.
This functions computes the Leaky V-Trace TD-errors and policy gradient
Advantage terms as used by the IMPALA distributed actor-critic agent.
Leaky-Vtrace is a combination of Importance sampling and V-trace, where the
degree of mixing is controlled by a scalar `alpha` (that may be meta-learnt).
See "Self-Tuning Deep Reinforcement Learning"
by Zahavy et al. (https://arxiv.org/abs/2002.12928) and
"IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
alpha: mixing the clipped importance sampling weights with unclipped ones.
lambda_: scalar mixing parameter lambda.
clip_rho_threshold: clip threshold for importance ratios.
clip_pg_rho_threshold: clip threshold for policy gradient importance ratios.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
a tuple of V-Trace error, policy gradient advantage, and estimated Q-values.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1], 1)
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1], float)
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
errors = leaky_vtrace(
v_tm1, v_t, r_t, discount_t, rho_tm1, alpha,
lambda_, clip_rho_threshold, stop_target_gradients)
targets_tm1 = errors + v_tm1
q_bootstrap = jnp.concatenate([
lambda_ * targets_tm1[1:] + (1 - lambda_) * v_tm1[1:],
v_t[-1:],
], axis=0)
q_estimate = r_t + discount_t * q_bootstrap
clipped_pg_rho_tm1 = ((1 - alpha) * rho_tm1 + alpha *
jnp.minimum(clip_pg_rho_threshold, rho_tm1))
pg_advantages = clipped_pg_rho_tm1 * (q_estimate - v_tm1)
return VTraceOutput(
errors=errors, pg_advantage=pg_advantages, q_estimate=q_estimate)
|
|
from warnings import warn
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from modeltree.tree import trees
SORT_DIRECTIONS = ('asc', 'desc')
class Node(object):
def __init__(self, facets=None, **context):
self.facets = facets or []
self.tree = context.pop('tree', None)
self.context = context
@property
def concept_ids(self):
ids = []
for facet in self.facets:
if facet.get('enabled') is False:
continue
if facet.get('visible') is False:
continue
ids.append(facet['concept'])
return ids
@property
def ordering(self):
ids = []
length = len(self.facets)
for facet in self.facets:
if facet.get('enabled') is False:
continue
if facet.get('sort') in SORT_DIRECTIONS:
# If sort is not defined, default to length of facets
ids.append((
facet.get('sort_index', length),
facet['concept'],
facet['sort'],
))
# Sort relative to sort index
ids.sort(key=lambda x: x[0])
# Return only the concept id and sort direction
return [(c, s) for i, c, s in ids]
def _get_concepts(self, ids):
"Returns an ordered list of concepts based on `ids`."
if not ids:
return []
from avocado.models import DataConcept
concepts = list(DataConcept.objects.filter(pk__in=ids))
concepts.sort(key=lambda o: ids.index(o.pk))
return concepts
def _get_fields_for_concepts(self, ids):
"Returns an ordered list of fields for concept `ids`."
from avocado.models import DataConceptField
if not ids:
return OrderedDict()
# Concept fields that are sorted by concept then order, but are not
# in the original order defined in `ids`
cfields = list(DataConceptField.objects.filter(concept__pk__in=ids)
.select_related()
.order_by('concept', 'order'))
# Order concept fields relative to `ids`
cfields.sort(key=lambda o: ids.index(o.concept.pk))
# Construct an ordered dict of fields by their concept
groups = OrderedDict()
for cf in cfields:
pk = cf.concept.pk
if pk not in groups:
groups[pk] = []
groups[pk].append(cf.field)
return groups
def _get_select(self, distinct):
# Apply all fields to the query to ensure ordering get applied.
# Django removes ORDER BY statements if column is not present in
# SELECT since it will cause a SQL error. This ensures the ordering
# is applied at the SQL level. The caveat here is that the rows
# returned will include this extra data. The exporter classes handle
# this by removing redundant rows relative to the *original* columns.
ids = list(self.concept_ids)
ordering = self.ordering
if ordering and distinct:
ids += list(zip(*ordering)[0])
# Flatten the grouped fields
fields = [i for l in self._get_fields_for_concepts(ids).values()
for i in l]
model_fields = []
for f in fields:
model_fields.append((f.model, f.label_field))
return model_fields
def _get_order_by(self):
"Returns directional lookups to be unpacked in `QuerySet.order_by`."
order_by = []
ordering = self.ordering
if ordering:
tree = trees[self.tree]
ids, directions = zip(*ordering)
groups = self._get_fields_for_concepts(ids)
for pk, direction in ordering:
for f in groups[pk]:
lookup = tree.query_string_for_field(f.order_field,
model=f.model)
if direction.lower() == 'desc':
order_by.append('-' + lookup)
else:
order_by.append(lookup)
return order_by
# Primary method for apply this view to a QuerySet
def apply(self, queryset=None, include_pk=True):
tree = trees[self.tree]
if queryset is None:
queryset = tree.get_queryset()
# Add the fields to the queryset
fields = self._get_select(queryset.query.distinct)
queryset = tree.add_select(queryset=queryset,
include_pk=include_pk,
*fields)
# Set the order by on the QuerySet
order_by = self._get_order_by()
if order_by:
queryset = queryset.order_by(*order_by)
return queryset
# Additional public methods for general use and interrogation
def get_concepts_for_select(self):
return self._get_concepts(self.concept_ids)
def get_fields_for_select(self):
return self._get_fields_for_concepts(self.concept_ids)
def get_concepts_for_order_by(self):
ids = []
ordering = self.ordering
if ordering:
ids = zip(*ordering)[0]
return self._get_concepts(ids)
def get_fields_for_order_by(self):
ids = []
ordering = self.ordering
if ordering:
ids = zip(*ordering)[0]
return self._get_fields_for_concepts(ids)
def convert_legacy(attrs):
facets = []
columns = attrs.get('columns', [])
ordering = attrs.get('ordering', [])
# Map to position
_ordering = {}
for i, (concept, direction) in enumerate(ordering):
_ordering[concept] = i
for pk in columns:
facet = {
'concept': pk,
'sort': None,
'sort_index': None,
'visible': True,
}
if pk in _ordering:
index = _ordering.pop(pk)
facet['sort'] = ordering[index][1]
facet['sort_index'] = index
facets.append(facet)
# Append the sort only concepts
for pk in _ordering:
index = _ordering[pk]
facets.append({
'visible': False,
'concept': pk,
'sort': ordering[index][1],
'sort_index': index,
})
return facets
def validate(facets, **context):
if not facets:
return None
from avocado.models import DataConcept
# Legacy format
if isinstance(facets, dict):
warn('The dict-based view structure has been deprecated. '
'A list of facets objects must now be provided.',
DeprecationWarning)
facets = convert_legacy(facets)
for attrs in facets:
enabled = attrs.pop('enabled', None)
attrs.pop('errors', None)
attrs.pop('warnings', None)
errors = []
warnings = []
concept = None
if 'concept' not in attrs:
enabled = False
errors.append('Concept is required')
else:
try:
concept = DataConcept.objects.get(pk=attrs.get('concept'))
except DataConcept.DoesNotExist:
enabled = False
errors.append('Concept does not exist')
if attrs.get('sort') and attrs['sort'] not in SORT_DIRECTIONS:
warnings.append('Invalid sort direction. Must be "asc" or "desc"')
if concept and not concept.sortable:
warnings.append('Cannot sort by concept')
if enabled is False:
attrs['enabled'] = False
# Amend errors and warnings if present
if errors:
attrs['errors'] = errors
if warnings:
attrs['warnings'] = warnings
return facets
def parse(facets, **context):
if not facets:
return Node(**context)
# Legacy format
if isinstance(facets, dict):
warn('The dict-based view structure has been deprecated. '
'A list of facets objects must now be provided.',
DeprecationWarning)
facets = convert_legacy(facets)
return Node(facets, **context)
|
|
import hashlib
import hmac
import logging
import re
import struct
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
from Crypto.Cipher import AES
from requests import Response
from requests.adapters import BaseAdapter
from streamlink.exceptions import NoStreamsError
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWriter
from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
class AbemaTVHLSStreamWriter(HLSStreamWriter):
def should_filter_sequence(self, sequence):
return "/tsad/" in sequence.segment.uri or super().should_filter_sequence(sequence)
class AbemaTVHLSStreamReader(HLSStreamReader):
__writer__ = AbemaTVHLSStreamWriter
class AbemaTVHLSStream(HLSStream):
__reader__ = AbemaTVHLSStreamReader
class AbemaTVLicenseAdapter(BaseAdapter):
'''
Handling abematv-license:// protocol to get real video key_data.
'''
STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
_MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
_LICENSE_API = "https://license.abema.io/abematv-hls"
_MEDIATOKEN_SCHEMA = validate.Schema({"token": validate.text})
_LICENSE_SCHEMA = validate.Schema({"k": validate.text,
"cid": validate.text})
def __init__(self, session, deviceid, usertoken):
self._session = session
self.deviceid = deviceid
self.usertoken = usertoken
super().__init__()
def _get_videokey_from_ticket(self, ticket):
params = {
"osName": "android",
"osVersion": "6.0.1",
"osLang": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
auth_header = {"Authorization": "Bearer " + self.usertoken}
res = self._session.http.get(self._MEDIATOKEN_API, params=params,
headers=auth_header)
jsonres = self._session.http.json(res,
schema=self._MEDIATOKEN_SCHEMA)
mediatoken = jsonres['token']
res = self._session.http.post(self._LICENSE_API,
params={"t": mediatoken},
json={"kv": "a", "lt": ticket})
jsonres = self._session.http.json(res,
schema=self._LICENSE_SCHEMA)
cid = jsonres['cid']
k = jsonres['k']
res = sum([self.STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i))
for i in range(len(k))])
encvideokey = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
# HKEY:
# RC4KEY = unhexlify('DB98A8E7CECA3424D975280F90BD03EE')
# RC4DATA = unhexlify(b'D4B718BBBA9CFB7D0192A58F9E2D146A'
# b'FC5DB29E4352DE05FC4CF2C1005804BB')
# rc4 = ARC4.new(RC4KEY)
# HKEY = rc4.decrypt(RC4DATA)
h = hmac.new(unhexlify(self.HKEY),
(cid + self.deviceid).encode("utf-8"),
digestmod=hashlib.sha256)
enckey = h.digest()
aes = AES.new(enckey, AES.MODE_ECB)
rawvideokey = aes.decrypt(encvideokey)
return rawvideokey
def send(self, request, stream=False, timeout=None, verify=True, cert=None,
proxies=None):
resp = Response()
resp.status_code = 200
ticket = re.findall(r"abematv-license://(.*)", request.url)[0]
resp._content = self._get_videokey_from_ticket(ticket)
return resp
def close(self):
return
@pluginmatcher(re.compile(r"""
https?://abema\.tv/(
now-on-air/(?P<onair>[^?]+)
|
video/episode/(?P<episode>[^?]+)
|
channels/.+?/slots/(?P<slots>[^?]+)
)
""", re.VERBOSE))
class AbemaTV(Plugin):
_CHANNEL = "https://api.abema.io/v1/channels"
_USER_API = "https://api.abema.io/v1/users"
_PRGM_API = "https://api.abema.io/v1/video/programs/{0}"
_SLOTS_API = "https://api.abema.io/v1/media/slots/{0}"
_PRGM3U8 = "https://vod-abematv.akamaized.net/program/{0}/playlist.m3u8"
_SLOTM3U8 = "https://vod-abematv.akamaized.net/slot/{0}/playlist.m3u8"
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
_USER_SCHEMA = validate.Schema({"profile": {"userId": validate.text},
"token": validate.text})
_CHANNEL_SCHEMA = validate.Schema({"channels": [{"id": validate.text,
"name": validate.text,
"playback": {validate.optional("dash"):
validate.text,
"hls": validate.text}}]})
_PRGM_SCHEMA = validate.Schema({"terms": [{validate.optional("onDemandType"): int}]})
_SLOT_SCHEMA = validate.Schema({"slot": {"flags": {validate.optional("timeshiftFree"): bool}}})
def __init__(self, url):
super().__init__(url)
self.session.http.headers.update({'User-Agent': useragents.CHROME})
def _generate_applicationkeysecret(self, deviceid):
deviceid = deviceid.encode("utf-8") # for python3
# plus 1 hour and drop minute and secs
# for python3 : floor division
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(self.SECRETKEY)
tmp = h.digest()
for i in range(time_struct.tm_mon):
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for i in range(time_struct.tm_mday % 5):
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for i in range(time_struct.tm_hour % 5): # utc hour
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
return urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
def _is_playable(self, vtype, vid):
auth_header = {"Authorization": "Bearer " + self.usertoken}
if vtype == "episode":
res = self.session.http.get(self._PRGM_API.format(vid),
headers=auth_header)
jsonres = self.session.http.json(res, schema=self._PRGM_SCHEMA)
playable = False
for item in jsonres["terms"]:
if item.get("onDemandType", False) == 3:
playable = True
return playable
elif vtype == "slots":
res = self.session.http.get(self._SLOTS_API.format(vid),
headers=auth_header)
jsonres = self.session.http.json(res, schema=self._SLOT_SCHEMA)
return jsonres["slot"]["flags"].get("timeshiftFree", False) is True
def _get_streams(self):
deviceid = str(uuid.uuid4())
appkeysecret = self._generate_applicationkeysecret(deviceid)
json_data = {"deviceId": deviceid,
"applicationKeySecret": appkeysecret}
res = self.session.http.post(self._USER_API, json=json_data)
jsonres = self.session.http.json(res, schema=self._USER_SCHEMA)
self.usertoken = jsonres['token'] # for authorzation
matchresult = self.match
if matchresult.group("onair"):
onair = matchresult.group("onair")
if onair == "news-global":
self._CHANNEL = update_qsd(self._CHANNEL, {"division": "1"})
res = self.session.http.get(self._CHANNEL)
jsonres = self.session.http.json(res, schema=self._CHANNEL_SCHEMA)
channels = jsonres["channels"]
for channel in channels:
if onair == channel["id"]:
break
else:
raise NoStreamsError(self.url)
playlisturl = channel["playback"]["hls"]
elif matchresult.group("episode"):
episode = matchresult.group("episode")
if not self._is_playable("episode", episode):
log.error("Premium stream is not playable")
return {}
playlisturl = self._PRGM3U8.format(episode)
elif matchresult.group("slots"):
slots = matchresult.group("slots")
if not self._is_playable("slots", slots):
log.error("Premium stream is not playable")
return {}
playlisturl = self._SLOTM3U8.format(slots)
log.debug("URL={0}".format(playlisturl))
# hook abematv private protocol
self.session.http.mount("abematv-license://",
AbemaTVLicenseAdapter(self.session, deviceid,
self.usertoken))
return AbemaTVHLSStream.parse_variant_playlist(self.session, playlisturl)
__plugin__ = AbemaTV
|
|
import pytest
import math
import time
import numpy as np
import numpy.linalg as la
from pyctrl.block.clock import Clock, TimerClock
from pyctrl.block import Map, Constant, Logger
from pyctrl.block.system import TimeVaryingSystem
test_ode = True
try:
from pyctrl.system.ode import ODE, ODEINT
except:
test_ode = False
def test1():
if not test_ode:
return
from pyctrl import Controller
controller = Controller()
Ts = 0.01
clock = TimerClock(period = Ts)
controller.add_source('clock',clock,['clock'])
a = -1
b = 1
def f(t, x, u, a, b):
return a * x + b * u
t0 = 0
uk = 1
x0 = np.array([0])
sys = ODE((1,1,1), f, x0 = x0, t0 = t0, pars = (a,b))
controller.add_signals('input','output')
controller.add_filter('condition',
Map(function = lambda x: x < 1),
['clock'], ['is_running'])
controller.add_filter('ode',TimeVaryingSystem(model = sys),['clock','input'],['output'])
controller.add_sink('logger',Logger(),['clock','output'])
print(controller.info('all'))
controller.set_filter('ode', reset = True)
controller.set_source('clock', reset = True)
controller.set_sink('logger', reset = True)
controller.set_signal('input',uk)
controller.run()
xk = sys.state
log = controller.get_sink('logger','log')
t0 = log['clock'][0,0]
tk = log['clock'][-1,0]
yk = log['output'][-1,0]
print('t0 = {}'.format(t0))
print('tk = {}'.format(tk))
print('yk = {}'.format(yk))
yyk = uk * (1 - math.exp(a*(tk-t0))) + x0[0] * math.exp(a*(tk-t0))
print(log)
print(t0, x0, tk, xk, yk, yyk)
assert np.abs(yk - yyk) < 1e-2
uk = 0
x0 = sys.state
controller.add_filter('condition',
Map(function = lambda x: x < 2),
['clock'], ['is_running'])
print(controller.info('all'))
print('clock = {}'.format(controller.get_signal('clock')))
#controller.set_source('clock', reset = True)
controller.set_sink('logger', reset = True)
controller.set_signal('input',uk)
controller.run()
xk = sys.state
print('clock = {}'.format(controller.get_signal('clock')))
log = controller.get_sink('logger','log')
print('log = {}'.format(log))
t0 = log['clock'][0,0]
tk = log['clock'][-1,0]
yk = log['output'][-1,0]
print('t0 = {}, x0 = {}, tk = {}, xk = {}, yk = {}'.format(t0, x0, tk, xk, yk))
yyk = uk * (1 - math.exp(a*(tk-t0))) + x0 * math.exp(a*(tk-t0))
print(log)
print(t0, x0, tk, xk, yk, yyk)
assert np.abs(yk - np.array([yyk])) < 1e-2
uk = -1
x0 = sys.state
controller.add_filter('condition',
Map(function = lambda x: x < 3),
['clock'], ['is_running'])
#controller.set_source('clock', reset = True)
controller.set_sink('logger', reset = True)
controller.set_signal('input',uk)
controller.run()
xk = sys.state
log = controller.get_sink('logger','log')
t0 = log['clock'][0,0]
tk = log['clock'][-1,0]
yk = log['output'][-1,0]
yyk = uk * (1 - math.exp(a*(tk-t0))) + x0 * math.exp(a*(tk-t0))
print(t0, x0, tk, xk, yk, yyk)
assert np.abs(yk - np.array([yyk])) < 1e-2
clock.set_enabled(False)
def test2():
if not test_ode:
return
m1 = 30/1000
l1 = 7.6/100
r1 = (5-(10-7.6)/2)/100
w1 = 10/100
d1 = 2.4/100
J1 = m1 * (w1**2 + d1**2) / 12
m2 = 44/1000
w2 = 25.4/100
d2 = 2.4/100
J2 = m2 * (w2**2 + d2**2) / 12
r2 = (25.4/2-1.25)/100
Jm = 0.004106
km = 0.006039
bm = 0.091503
g = 9.8
bPhi = 0
bTheta = 0
def MK(x,u):
theta, phi, thetaDot, phiDot = x
return (np.array([[J2+m2*r2**2, m2*r2*l1*math.cos(theta-phi)],
[m2*r2*l1*math.cos(theta-phi), J1+Jm+m1*r1**2+m2*l1**2]]),
np.array([bTheta*thetaDot+m2*r2*(g*math.sin(theta)+l1*math.sin(theta-phi)*phiDot**2),
g*(m1*r1+m2*l1)*math.sin(phi)-m2*r2*l1*math.sin(theta-phi)*thetaDot**2+(bm+bPhi)*phiDot-km*u[0]]))
def ff(t, x, u):
M, K = MK(x,u)
return np.hstack((x[2:4], -la.solve(M,K)))
theta0, phi0 = 0+math.pi/6, 0
t0, x0, u0 = 0, np.array([theta0,phi0,0,0]), [0]
M,K = MK(x0,u0)
print(M)
print(K)
print(ff(t0,x0,u0))
sys = ODE(shape = (1,4,4), t0 = t0, x0 = x0, f = ff)
tk = 5
uk = [0]
yk = sys.update(tk, uk)
print('1. [{:3.2f}, {:3.2f}] = {}'.format(t0, tk, yk))
from pyctrl import Controller
controller = Controller()
Ts = 0.01
controller.add_source('clock',Clock(),['clock'])
condition = Map(function = lambda t : t < T)
controller.add_filter('condition',condition,['clock'],['is_running'])
controller.add_signals('tau','x')
controller.add_filter('ode',
TimeVaryingSystem(model = ODE(shape = (1,4,4), t0 = t0, x0 = x0, f = ff)),
['clock','tau'], ['x'])
controller.add_sink('logger',Logger(),['clock','x'])
controller.set_source('clock',reset=True)
T = 5 + Ts
controller.run()
log = controller.get_sink('logger','log')
t0 = log['clock'][0,0]
tk = log['clock'][0,-1]
yk = log['x'][0,-1]
# yk = log[-1,1:]
print('2. [{:3.2f}, {:3.2f}] = {}'.format(t0, tk, yk))
import control
fc = 7
wc = 2 * math.pi * fc
lpf = control.tf(wc,[1,wc])
ctr = -2*100
def gg(t, x, u):
return [x[0]]
Ts = 0.01
Ac, Bc, Cc, Dc = map(np.array, control.ssdata(control.ss(lpf * ctr)))
nc = Ac.shape[0]
def F(t, x, ref):
x, xc = x[0:4], x[4:4+nc]
y = ref - gg(t,x,[0])
u = max(-100,min(100,Cc.dot(xc)+Dc.dot(y)))
#print(ff(t,x,u))
return np.hstack((ff(t,x,u), Ac.dot(xc)+Bc.dot(y)))
eta = 0
kappa = 0
ref = np.array([eta * math.pi])
theta0 = -20*math.pi/180
xx0 = [kappa*math.pi-theta0,eta*math.pi,0,0]
xc0 = np.zeros((nc,))
x0 = np.hstack((xx0,xc0))
t0 = 0
print('F = {}'.format(F(t0, x0, ref)))
sys = ODE(shape = (1,4,4), t0 = t0, x0 = x0, f = F)
tk = 1
uk = np.array([0])
yk = sys.update(tk, uk)
print('1. [{:3.2f}, {:3.2f}] = {}'.format(t0, tk, yk))
controller.reset()
Ts = 0.01
controller.add_source('clock',Clock(),['clock'])
condition = Map(function = lambda t : t < T)
controller.add_filter('condition',condition,['clock'],['is_running'])
controller.add_signals('ref','x')
controller.add_filter('ode',
TimeVaryingSystem(model = ODE(shape = (1,4,4), t0 = t0, x0 = x0, f = F)),
['clock','ref'], ['x'])
controller.add_sink('logger',Logger(),['clock','x'])
#print(controller.info('all'))
controller.set_source('clock',reset=True)
controller.set_signal('ref',ref)
T = 1 + Ts
controller.run()
log = controller.get_sink('logger','log')
t0 = log['clock'][0,0]
tk = log['clock'][0,-1]
yk = log['x'][0,-1]
#yk = log[-1,1:]
print('2. [{:3.2f}, {:3.2f}] = {}'.format(t0, tk, yk))
|
|
#! /usr/bin/env python3.7
import logging
import queue
import socket
import ssl
import threading
import time
from typing import Any, Dict, List, Set
PASSTHROUGH_ACTIONS = (
"PRIVMSG",
"NOTICE",
"USERSTATE",
"GLOBALUSERSTATE",
"HOSTTARGET",
"CLEARCHAT",
"JOIN",
"PART",
"MODE",
"RECONNECT",
"ROOMSTATE",
"CAP",
"USERNOTICE"
)
class IRC:
def __init__(self,
host: str,
port: int,
username: str,
oauth="",
use_ssl=False,
callback=None,
max_worker_threads=5) -> None:
self.host = host
self.port = port
self.use_ssl = use_ssl
self.username = username
self.oauth = oauth
self.socket = socket.socket()
self.connected = False
self.channels: Set[str] = set()
self.capabilities: Set[str] = set()
self.continue_loop = True
self.create()
self.callback = callback
self.msg_queue: queue.Queue = queue.Queue()
self.msg_queue_size = 0
self.logger = logging.getLogger("IRC.IRC")
self.worker_thread = threading.Thread(target=self.msg_worker)
self.worker_thread.daemon = True
self.worker_thread.start()
if max_worker_threads < 0:
max_worker_threads = 0
self.max_worker_threads = max_worker_threads
self.tmp_threads = 0
def create(self) -> None:
self.socket = socket.socket()
self.socket.settimeout(600)
if self.use_ssl:
self.socket = ssl.wrap_socket(self.socket)
def connect(self) -> None:
sleep_time = 2
while True:
try:
self.socket.connect((self.host, self.port))
except ValueError as e:
self.logger.error("Tried to connect to already connected socket!")
self.disconnect()
self.create()
continue
except Exception as e:
self.logger.exception(e)
time.sleep(sleep_time)
sleep_time = sleep_time ** 2
continue
self.capability(" ".join(self.capabilities))
if self.oauth:
self.raw("PASS {0}".format(self.oauth))
self.raw("NICK {0}".format(self.username))
time.sleep(.5)
self.connected = True
if self.channels:
for i in self.channels:
self.join(i)
time.sleep(.2)
break
def disconnect(self) -> None:
try:
self.quit()
except Exception:
pass
finally:
self.socket.close()
self.connected = False
self.continue_loop = False
def reconnect(self) -> None:
self.disconnect()
self.create()
self.connect()
def raw(self, msg) -> None:
self.socket.sendall("{0}\r\n".format(msg).encode("utf-8"))
def ping(self) -> None:
self.raw("PING")
def pong(self, host: str) -> None:
self.raw("PONG {0}".format(host))
def join(self, channel: str) -> None:
self.raw("JOIN #{0}".format(channel))
self.channels.add(channel)
def part(self, channel: str) -> None:
self.raw("PART #{0}".format(channel))
self.channels.remove(channel)
def quit(self) -> None:
self.raw("QUIT")
def capability(self, cap: str) -> None:
self.raw("CAP REQ :{0}".format(cap))
def privmsg(self, channel: str, msg: str) -> None:
self.raw("PRIVMSG #{0} :{1}".format(channel, msg))
def pm(self, user: str, msg: str) -> None:
self.raw("PRIVMSG {0} :{1}".format(user, msg))
def get_peer_ip(self) -> None:
return self.socket.getpeername()[0]
def recv(self, amount: int) -> str:
inc_msg = self.socket.recv(amount)
return inc_msg.decode("utf-8")
@staticmethod
def parse(msg: str) -> Dict[str, Any]:
position = 0
next_space = 0
c_msg: Dict[str, Any] = {
"raw": msg,
"tags": {},
"prefix": None,
"action": None,
"params": []
}
msg = msg.strip("\r\n")
if msg[position] == "@":
next_space = msg.find(" ")
c_msg["tags"] = IRC.process_tags(msg[:next_space])
position = next_space + 1
while msg[position] == " ":
position += 1
if msg[position] == ":":
next_space = msg.find(" ", position)
c_msg["prefix"] = msg[position: next_space]
position = next_space + 1
while position == " ":
position += 1
next_space = msg.find(" ", position)
if next_space == -1:
if len(msg) > position:
c_msg["action"] = msg[position:]
return c_msg
c_msg["action"] = msg[position: next_space]
position = next_space + 1
while msg[position] == " ":
position += 1
while position < len(msg):
next_space = msg.find(" ", position)
if msg[position] == ":":
c_msg["params"].append(msg[position + 1:])
break
if next_space != -1:
c_msg["params"].append(msg[position: next_space])
position = next_space + 1
while msg[position] == " ":
position += 1
continue
if next_space == -1:
c_msg["params"].append(msg[position:])
break
return c_msg
@staticmethod
def process_tags(tags: str) -> Dict[str, str]:
tags_dict: Dict[str, str] = {}
for item in tags[1:].split(";"):
tmp = item.split("=")
tags_dict[tmp[0]] = tmp[1]
escaped_dict = {}
for k, v in tags_dict.items():
new_k = k.replace("\\:", ";").replace("\\s", " ").replace("\\\\", "\\")
new_v = v.replace("\\:", ";").replace("\\s", " ").replace("\\\\", "\\")
escaped_dict[new_k] = new_v
return escaped_dict
def msg_worker(self) -> None:
while self.continue_loop:
if self.msg_queue_size > 5 and self.max_worker_threads < self.tmp_threads:
self.logger.info("Spawning worker thread.")
t = threading.Thread(target=self.tmp_msg_worker)
t.daemon = True
t.start()
self.tmp_threads += 1
try:
self.callback(self.msg_queue.get())
except Exception as e:
self.logger.exception(e)
self.msg_queue_size -= 1
self.msg_queue.task_done()
@staticmethod
def extra_parse(msg: str) -> Dict[str, Any]:
c_msg = IRC.parse(msg)
if c_msg["action"] in ("PRIVMSG", "NOTICE", "HOSTTARGET"):
c_msg["message"] = c_msg["params"][1]
c_msg["sender"] = c_msg["prefix"].split("!")[0][1:]
if c_msg["action"] in ("ROOMSTATE", "USERSTATE", "JOIN", "PRIVMSG", "NOTICE", "HOSTTARGET", "CLEARCHAT", "USERNOTICE", "JOIN", "PART", "MODE"):
c_msg["channel"] = c_msg["params"][0]
return c_msg
def tmp_msg_worker(self) -> None:
while True:
try:
self.callback(self.msg_queue.get(False))
self.msg_queue_size -= 1
self.msg_queue.task_done()
except queue.Empty:
break
self.tmp_threads -= 1
return
def main_loop(self) -> None:
msg_buffer = ""
lines: List[str] = []
while self.continue_loop:
try:
tmp_buffer = self.recv(4096)
except socket.timeout:
if self.continue_loop:
self.reconnect()
continue
except Exception as e:
# Assume connection is trashed
if self.continue_loop:
self.reconnect()
continue
self.timeout_count = 0
if tmp_buffer == "":
if self.continue_loop:
self.logger.info("Connection for {0} reset, reconnecting.".format(self.username))
self.reconnect()
continue
msg_buffer += tmp_buffer
lines += msg_buffer.split("\r\n")
while len(lines) > 1:
current_message = lines.pop(0)
if current_message == "":
continue
msg_parts = self.extra_parse(current_message)
if msg_parts["action"] == "PING":
self.pong(msg_parts["params"][0])
continue
msg_parts["bot_name"] = self.username
if msg_parts["action"] == "CAP":
if msg_parts["params"][1] == "ACK":
for i in msg_parts["params"][2].split(" "):
self.capabilities.add(i)
elif msg_parts["params"][1] == "NAK":
for i in msg_parts["params"][2].split(" "):
if i in self.capabilities:
self.capabilities.remove(i)
if self.callback:
if msg_parts["action"] in PASSTHROUGH_ACTIONS:
self.msg_queue.put(msg_parts)
self.msg_queue_size += 1
else:
raise ValueError("Callback should be defined before calling main_loop.")
if lines[0] != "":
msg_buffer = lines[0]
else:
msg_buffer = lines.pop(0)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class IndexServiceTransport(abc.ABC):
"""Abstract transport class for IndexService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_index: gapic_v1.method.wrap_method(
self.create_index, default_timeout=5.0, client_info=client_info,
),
self.get_index: gapic_v1.method.wrap_method(
self.get_index, default_timeout=5.0, client_info=client_info,
),
self.list_indexes: gapic_v1.method.wrap_method(
self.list_indexes, default_timeout=5.0, client_info=client_info,
),
self.update_index: gapic_v1.method.wrap_method(
self.update_index, default_timeout=5.0, client_info=client_info,
),
self.delete_index: gapic_v1.method.wrap_method(
self.delete_index, default_timeout=5.0, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_index(
self,
) -> Callable[
[index_service.CreateIndexRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_index(
self,
) -> Callable[
[index_service.GetIndexRequest], Union[index.Index, Awaitable[index.Index]]
]:
raise NotImplementedError()
@property
def list_indexes(
self,
) -> Callable[
[index_service.ListIndexesRequest],
Union[
index_service.ListIndexesResponse,
Awaitable[index_service.ListIndexesResponse],
],
]:
raise NotImplementedError()
@property
def update_index(
self,
) -> Callable[
[index_service.UpdateIndexRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_index(
self,
) -> Callable[
[index_service.DeleteIndexRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("IndexServiceTransport",)
|
|
#!/usr/bin/env python
#
# Created by: Pearu Peterson, April 2002
#
__usage__ = """
Build linalg:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.lib.blas.test()'
"""
import math
from numpy import array
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_almost_equal, TestCase, run_module_suite
from scipy.lib.blas import fblas
from scipy.lib.blas import cblas
from scipy.lib.blas import get_blas_funcs
class TestCBLAS1Simple(TestCase):
def test_axpy(self):
for p in 'sd':
f = getattr(cblas,p+'axpy',None)
if f is None: continue
assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18])
for p in 'cz':
f = getattr(cblas,p+'axpy',None)
if f is None: continue
assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18])
class TestFBLAS1Simple(TestCase):
def test_axpy(self):
for p in 'sd':
f = getattr(fblas,p+'axpy',None)
if f is None: continue
assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18])
for p in 'cz':
f = getattr(fblas,p+'axpy',None)
if f is None: continue
assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18])
def test_copy(self):
for p in 'sd':
f = getattr(fblas,p+'copy',None)
if f is None: continue
assert_array_almost_equal(f([3,4,5],[8]*3),[3,4,5])
for p in 'cz':
f = getattr(fblas,p+'copy',None)
if f is None: continue
assert_array_almost_equal(f([3,4j,5+3j],[8]*3),[3,4j,5+3j])
def test_asum(self):
for p in 'sd':
f = getattr(fblas,p+'asum',None)
if f is None: continue
assert_almost_equal(f([3,-4,5]),12)
for p in ['sc','dz']:
f = getattr(fblas,p+'asum',None)
if f is None: continue
assert_almost_equal(f([3j,-4,3-4j]),14)
def test_dot(self):
for p in 'sd':
f = getattr(fblas,p+'dot',None)
if f is None: continue
assert_almost_equal(f([3,-4,5],[2,5,1]),-9)
for p in 'cz':
f = getattr(fblas,p+'dotu',None)
if f is None: continue
assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j)
f = getattr(fblas,p+'dotc')
assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j)
def test_nrm2(self):
for p in 'sd':
f = getattr(fblas,p+'nrm2',None)
if f is None: continue
assert_almost_equal(f([3,-4,5]),math.sqrt(50))
for p in ['sc','dz']:
f = getattr(fblas,p+'nrm2',None)
if f is None: continue
assert_almost_equal(f([3j,-4,3-4j]),math.sqrt(50))
def test_scal(self):
for p in 'sd':
f = getattr(fblas,p+'scal',None)
if f is None: continue
assert_array_almost_equal(f(2,[3,-4,5]),[6,-8,10])
for p in 'cz':
f = getattr(fblas,p+'scal',None)
if f is None: continue
assert_array_almost_equal(f(3j,[3j,-4,3-4j]),[-9,-12j,12+9j])
for p in ['cs','zd']:
f = getattr(fblas,p+'scal',None)
if f is None: continue
assert_array_almost_equal(f(3,[3j,-4,3-4j]),[9j,-12,9-12j])
def test_swap(self):
for p in 'sd':
f = getattr(fblas,p+'swap',None)
if f is None: continue
x,y = [2,3,1],[-2,3,7]
x1,y1 = f(x,y)
assert_array_almost_equal(x1,y)
assert_array_almost_equal(y1,x)
for p in 'cz':
f = getattr(fblas,p+'swap',None)
if f is None: continue
x,y = [2,3j,1],[-2,3,7-3j]
x1,y1 = f(x,y)
assert_array_almost_equal(x1,y)
assert_array_almost_equal(y1,x)
def test_amax(self):
for p in 'sd':
f = getattr(fblas,'i'+p+'amax')
assert_equal(f([-2,4,3]),1)
for p in 'cz':
f = getattr(fblas,'i'+p+'amax')
assert_equal(f([-5,4+3j,6]),1)
#XXX: need tests for rot,rotm,rotg,rotmg
class TestFBLAS2Simple(TestCase):
def test_gemv(self):
for p in 'sd':
f = getattr(fblas,p+'gemv',None)
if f is None: continue
assert_array_almost_equal(f(3,[[3]],[-4]),[-36])
assert_array_almost_equal(f(3,[[3]],[-4],3,[5]),[-21])
for p in 'cz':
f = getattr(fblas,p+'gemv',None)
if f is None: continue
assert_array_almost_equal(f(3j,[[3-4j]],[-4]),[-48-36j])
assert_array_almost_equal(f(3j,[[3-4j]],[-4],3,[5j]),[-48-21j])
def test_ger(self):
for p in 'sd':
f = getattr(fblas,p+'ger',None)
if f is None: continue
assert_array_almost_equal(f(1,[1,
2],[3,4]),[[3,4],[6,8]])
assert_array_almost_equal(f(2,[1,
2,
3],[3,4]),[[6,8],[12,16],[18,24]])
assert_array_almost_equal(f(1,[1,
2],[3,4],
a=[[1,2],[3,4]]
),[[4,6],[9,12]])
for p in 'cz':
f = getattr(fblas,p+'geru',None)
if f is None: continue
assert_array_almost_equal(f(1,[1j,
2],[3,4]),[[3j,4j],[6,8]])
assert_array_almost_equal(f(-2,[1j,
2j,
3j],[3j,4j]),[[6,8],[12,16],[18,24]])
for p in 'cz':
f = getattr(fblas,p+'gerc',None)
if f is None: continue
assert_array_almost_equal(f(1,[1j,
2],[3,4]),[[3j,4j],[6,8]])
assert_array_almost_equal(f(2,[1j,
2j,
3j],[3j,4j]),[[6,8],[12,16],[18,24]])
class TestFBLAS3Simple(TestCase):
def test_gemm(self):
for p in 'sd':
f = getattr(fblas,p+'gemm',None)
if f is None: continue
assert_array_almost_equal(f(3,[3],[-4]),[[-36]])
assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21])
assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]])
assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]])
for p in 'cz':
f = getattr(fblas,p+'gemm',None)
if f is None: continue
assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]])
assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j])
assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]])
assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]])
def test_gemm2(self):
for p in 'sdcz':
f = getattr(fblas,p+'gemm',None)
if f is None: continue
assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[[11]])
assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]])
class TestBLAS(TestCase):
def test_blas(self):
a = array([[1,1,1]])
b = array([[1],[1],[1]])
gemm, = get_blas_funcs(('gemm',),(a,b))
assert_array_almost_equal(gemm(1,a,b),[[3]],15)
if __name__ == "__main__":
run_module_suite()
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_utils import encodeutils
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
if sys.version_info > (3,):
long = int
class GuestTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
def test_repr(self):
self.domain.ID.return_value = 99
self.domain.UUIDString.return_value = "UUID"
self.domain.name.return_value = "foo"
self.assertEqual("<Guest 99 foo UUID>", repr(self.guest))
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create(self, mock_define):
libvirt_guest.Guest.create("xml", self.host)
mock_define.assert_called_once_with("xml")
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create_exception(self, mock_define):
mock_define.side_effect = test.TestingException
self.assertRaises(test.TestingException,
libvirt_guest.Guest.create,
"foo", self.host)
def test_launch(self):
self.guest.launch()
self.domain.createWithFlags.assert_called_once_with(0)
def test_launch_and_pause(self):
self.guest.launch(pause=True)
self.domain.createWithFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_START_PAUSED)
def test_shutdown(self):
self.domain.shutdown = mock.MagicMock()
self.guest.shutdown()
self.domain.shutdown.assert_called_once_with()
@mock.patch.object(encodeutils, 'safe_decode')
def test_launch_exception(self, mock_safe_decode):
self.domain.createWithFlags.side_effect = test.TestingException
mock_safe_decode.return_value = "</xml>"
self.assertRaises(test.TestingException, self.guest.launch)
self.assertEqual(1, mock_safe_decode.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin(self, mock_get_interfaces, mock_execute):
mock_get_interfaces.return_value = ["vnet0", "vnet1"]
self.guest.enable_hairpin()
mock_execute.assert_has_calls([
mock.call(
'tee', '/sys/class/net/vnet0/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1]),
mock.call(
'tee', '/sys/class/net/vnet1/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1])])
@mock.patch.object(encodeutils, 'safe_decode')
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin_exception(self, mock_get_interfaces,
mock_execute, mock_safe_decode):
mock_get_interfaces.return_value = ["foo"]
mock_execute.side_effect = test.TestingException('oops')
self.assertRaises(test.TestingException, self.guest.enable_hairpin)
self.assertEqual(1, mock_safe_decode.called)
def test_get_interfaces(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="network">
<target dev="vnet0"/>
</interface>
<interface type="network">
<target dev="vnet1"/>
</interface>
</devices>
</domain>"""
self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces())
def test_get_interfaces_exception(self):
self.domain.XMLDesc.return_value = "<bad xml>"
self.assertEqual([], self.guest.get_interfaces())
def test_poweroff(self):
self.guest.poweroff()
self.domain.destroy.assert_called_once_with()
def test_resume(self):
self.guest.resume()
self.domain.resume.assert_called_once_with()
@mock.patch('time.time', return_value=1234567890.125)
def test_time_sync_no_errors(self, time_mock):
self.domain.setTime.side_effect = fakelibvirt.libvirtError('error')
self.guest.sync_guest_time()
self.domain.setTime.assert_called_once_with(time={
'nseconds': 125000000,
'seconds': 1234567890})
def test_get_vcpus_info(self):
self.domain.vcpus.return_value = ([(0, 1, int(10290000000), 2)],
[(True, True)])
vcpus = list(self.guest.get_vcpus_info())
self.assertEqual(0, vcpus[0].id)
self.assertEqual(2, vcpus[0].cpu)
self.assertEqual(1, vcpus[0].state)
self.assertEqual(int(10290000000), vcpus[0].time)
def test_delete_configuration(self):
self.guest.delete_configuration()
self.domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
def test_delete_configuration_exception(self):
self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError(
'oops')
self.domain.ID.return_value = 1
self.guest.delete_configuration()
self.domain.undefine.assert_called_once_with()
def test_attach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_attach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_attach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_attach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_detach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_detach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_detach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_device_with_retry_detach_success(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
get_config = mock.Mock()
# Force multiple retries of detach
get_config.side_effect = [conf, conf, conf, None]
dev_path = "/dev/vdb"
retry_detach = self.guest.detach_device_with_retry(
get_config, dev_path, persistent=True, live=True,
inc_sleep_time=.01)
# Ensure we've only done the initial detach call
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
get_config.assert_called_with(dev_path)
# Some time later, we can do the wait/retry to ensure detach succeeds
self.domain.detachDeviceFlags.reset_mock()
retry_detach()
# Should have two retries before we pretend device is detached
self.assertEqual(2, self.domain.detachDeviceFlags.call_count)
def test_detach_device_with_retry_detach_failure(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
# Continue to return some value for the disk config
get_config = mock.Mock(return_value=conf)
retry_detach = self.guest.detach_device_with_retry(
get_config, "/dev/vdb", persistent=True, live=True,
inc_sleep_time=.01, max_retry_count=3)
# Ensure we've only done the initial detach call
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
# Some time later, we can do the wait/retry to ensure detach
self.domain.detachDeviceFlags.reset_mock()
# Should hit max # of retries
self.assertRaises(exception.DeviceDetachFailed, retry_detach)
self.assertEqual(4, self.domain.detachDeviceFlags.call_count)
def test_detach_device_with_retry_device_not_found(self):
get_config = mock.Mock(return_value=None)
self.assertRaises(
exception.DeviceNotFound, self.guest.detach_device_with_retry,
get_config, "/dev/vdb", persistent=True, live=True)
@mock.patch.object(libvirt_guest.Guest, "detach_device")
def test_detach_device_with_retry_operation_failed(self, mock_detach):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
get_config = mock.Mock(return_value=conf)
fake_device = "vdb"
fake_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
msg="invalid argument: no target device vdb",
error_code=fakelibvirt.VIR_ERR_OPERATION_FAILED,
error_message="disk vdb not found",
error_domain=fakelibvirt.VIR_FROM_DOMAIN)
mock_detach.side_effect = [None, fake_exc]
retry_detach = self.guest.detach_device_with_retry(
get_config, fake_device, persistent=True, live=True,
inc_sleep_time=.01, max_retry_count=3)
# Some time later, we can do the wait/retry to ensure detach
self.domain.detachDeviceFlags.reset_mock()
self.assertRaises(exception.DeviceNotFound, retry_detach)
def test_get_xml_desc(self):
self.guest.get_xml_desc()
self.domain.XMLDesc.assert_called_once_with(flags=0)
def test_get_xml_desc_dump_inactive(self):
self.guest.get_xml_desc(dump_inactive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
def test_get_xml_desc_dump_sensitive(self):
self.guest.get_xml_desc(dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_SECURE)
def test_get_xml_desc_dump_inactive_dump_sensitive(self):
self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
def test_get_xml_desc_dump_migratable(self):
self.guest.get_xml_desc(dump_migratable=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
def test_has_persistent_configuration(self):
self.assertTrue(
self.guest.has_persistent_configuration())
self.domain.isPersistent.assert_called_once_with()
def test_save_memory_state(self):
self.guest.save_memory_state()
self.domain.managedSave.assert_called_once_with(0)
def test_get_block_device(self):
disk = 'vda'
gblock = self.guest.get_block_device(disk)
self.assertEqual(disk, gblock._disk)
self.assertEqual(self.guest, gblock._guest)
def test_set_user_password(self):
self.guest.set_user_password("foo", "123")
self.domain.setUserPassword.assert_called_once_with("foo", "123", 0)
def test_get_devices(self):
xml = """
<domain type='qemu'>
<name>QEMUGuest1</name>
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<memory unit='KiB'>219136</memory>
<currentMemory unit='KiB'>219136</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='i686' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu</emulator>
<disk type='block' device='disk'>
<driver name='qemu' type='raw'/>
<source dev='/dev/HostVG/QEMUGuest2'/>
<target dev='hda' bus='ide'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<auth username='myname'>
<secret type='iscsi' usage='mycluster_myname'/>
</auth>
<source protocol='iscsi' name='iqn.1992-01.com.example'>
<host name='example.org' port='6000'/>
</source>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<source protocol='iscsi' name='iqn.1992-01.com.example/1'>
<host name='example.org' port='6000'/>
</source>
<target dev='vdb' bus='virtio'/>
</disk>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x5'/>
</source>
</hostdev>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/>
</source>
</hostdev>
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>
<controller type='usb' index='0'/>
<controller type='pci' index='0' model='pci-root'/>
<memballoon model='none'/>
</devices>
</domain>
"""
self.domain.XMLDesc.return_value = xml
devs = self.guest.get_all_devices()
# Only currently parse <disk>, <hostdev> and <interface> elements
# hence we're not counting the controller/memballoon
self.assertEqual(6, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[5], vconfig.LibvirtConfigGuestInterface)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk)
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_disks()
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestHostdev)
self.assertEqual(2, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestInterface)
self.assertEqual(1, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestInterface)
cfg = vconfig.LibvirtConfigGuestInterface()
cfg.parse_str("""
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
def test_get_info(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
self.domain.ID.return_value = 6
info = self.guest.get_info(self.host)
self.domain.info.assert_called_once_with()
self.assertEqual(1, info.state)
self.assertEqual(2, info.max_mem_kb)
self.assertEqual(3, info.mem_kb)
self.assertEqual(4, info.num_cpu)
self.assertEqual(5, info.cpu_time_ns)
self.assertEqual(6, info.id)
def test_get_power_state(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
power = self.guest.get_power_state(self.host)
self.assertEqual(1, power)
def test_is_active_when_domain_is_active(self):
with mock.patch.object(self.domain, "isActive", return_value=True):
self.assertTrue(self.guest.is_active())
def test_is_active_when_domain_not_active(self):
with mock.patch.object(self.domain, "isActive", return_value=False):
self.assertFalse(self.guest.is_active())
def test_freeze_filesystems(self):
self.guest.freeze_filesystems()
self.domain.fsFreeze.assert_called_once_with()
def test_thaw_filesystems(self):
self.guest.thaw_filesystems()
self.domain.fsThaw.assert_called_once_with()
def _conf_snapshot(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestSnapshotDisk)
conf.to_xml.return_value = '<disk/>'
return conf
def test_snapshot(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf)
self.domain.snapshotCreateXML('<disk/>', flags=0)
conf.to_xml.assert_called_once_with()
def test_snapshot_no_metadata(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, no_metadata=True)
self.domain.snapshotCreateXML(
'<disk/>',
flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)
conf.to_xml.assert_called_once_with()
def test_snapshot_disk_only(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, disk_only=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)
conf.to_xml.assert_called_once_with()
def test_snapshot_reuse_ext(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, reuse_ext=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
conf.to_xml.assert_called_once_with()
def test_snapshot_quiesce(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, quiesce=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
conf.to_xml.assert_called_once_with()
def test_snapshot_all(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, no_metadata=True,
disk_only=True, reuse_ext=True,
quiesce=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=(
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE))
conf.to_xml.assert_called_once_with()
def test_pause(self):
self.guest.pause()
self.domain.suspend.assert_called_once_with()
def test_migrate_v1(self):
self.guest.migrate('an-uri', flags=1, bandwidth=2)
self.domain.migrateToURI.assert_called_once_with(
'an-uri', flags=1, bandwidth=2)
def test_migrate_v2(self):
self.guest.migrate('an-uri', domain_xml='</xml>', flags=1, bandwidth=2)
self.domain.migrateToURI2.assert_called_once_with(
'an-uri', miguri=None, dxml='</xml>', flags=1, bandwidth=2)
def test_migrate_v3(self):
self.guest.migrate('an-uri', domain_xml='</xml>',
params={'p1': 'v1'}, flags=1, bandwidth=2)
self.domain.migrateToURI3.assert_called_once_with(
'an-uri', flags=1, params={'p1': 'v1'})
def test_abort_job(self):
self.guest.abort_job()
self.domain.abortJob.assert_called_once_with()
def test_migrate_configure_max_downtime(self):
self.guest.migrate_configure_max_downtime(1000)
self.domain.migrateSetMaxDowntime.assert_called_once_with(1000)
class GuestBlockTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestBlockTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
self.gblock = self.guest.get_block_device('vda')
def test_abort_job(self):
self.gblock.abort_job()
self.domain.blockJobAbort.assert_called_once_with('vda', flags=0)
def test_abort_job_async(self):
self.gblock.abort_job(async=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)
def test_abort_job_pivot(self):
self.gblock.abort_job(pivot=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
def test_get_job_info(self):
self.domain.blockJobInfo.return_value = {
"type": 1,
"bandwidth": 18,
"cur": 66,
"end": 100}
info = self.gblock.get_job_info()
self.assertEqual(1, info.job)
self.assertEqual(18, info.bandwidth)
self.assertEqual(66, info.cur)
self.assertEqual(100, info.end)
self.domain.blockJobInfo.assert_called_once_with('vda', flags=0)
def test_resize(self):
self.gblock.resize(10)
self.domain.blockResize.assert_called_once_with('vda', 10)
def test_rebase(self):
self.gblock.rebase("foo")
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=0)
def test_rebase_shallow(self):
self.gblock.rebase("foo", shallow=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
def test_rebase_reuse_ext(self):
self.gblock.rebase("foo", reuse_ext=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
def test_rebase_copy(self):
self.gblock.rebase("foo", copy=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY)
def test_rebase_relative(self):
self.gblock.rebase("foo", relative=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
def test_commit(self):
self.gblock.commit("foo", "top")
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0, flags=0)
def test_commit_relative(self):
self.gblock.commit("foo", "top", relative=True)
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
def test_is_job_complete_cur_end_zeros(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 0,
"end": 0}
is_complete = self.gblock.is_job_complete()
self.assertFalse(is_complete)
def test_is_job_complete_current_lower_than_end(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 95,
"end": 100}
is_complete = self.gblock.is_job_complete()
self.assertFalse(is_complete)
def test_is_job_complete_finished(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 100,
"end": 100}
is_complete = self.gblock.is_job_complete()
self.assertTrue(is_complete)
def test_is_job_complete_no_job(self):
self.domain.blockJobInfo.return_value = {}
is_complete = self.gblock.is_job_complete()
self.assertTrue(is_complete)
def test_is_job_complete_exception(self):
self.domain.blockJobInfo.side_effect = fakelibvirt.libvirtError('fake')
self.assertRaises(fakelibvirt.libvirtError,
self.gblock.is_job_complete)
class JobInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(JobInfoTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.conn = fakelibvirt.openAuth("qemu:///system",
[[], lambda: True])
xml = ("<domain type='kvm'>"
" <name>instance-0000000a</name>"
"</domain>")
self.dom = self.conn.createXML(xml, 0)
self.guest = libvirt_guest.Guest(self.dom)
libvirt_guest.JobInfo._have_job_stats = True
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats(self, mock_stats, mock_info):
mock_stats.return_value = {
"type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
"memory_total": 75,
"memory_processed": 50,
"memory_remaining": 33,
"some_new_libvirt_stat_we_dont_know_about": 83
}
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(0, info.disk_total)
self.assertEqual(0, info.disk_processed)
self.assertEqual(0, info.disk_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_support(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_attr_error(self, mock_stats, mock_info):
mock_stats.side_effect = AttributeError("No such API")
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
|
|
"""tests basic polymorphic mapper loading/saving, minimal relationships"""
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.test import Column, testing
from sqlalchemy.util import function_named
from test.orm import _fixtures, _base
class Person(_fixtures.Base):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Company(_fixtures.Base):
pass
class PolymorphTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global companies, people, engineers, managers, boss
companies = Table('companies', metadata,
Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey('companies.company_id')),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
boss = Table('boss', metadata,
Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True),
Column('golf_swing', String(30)),
)
metadata.create_all()
class InsertOrderTest(PolymorphTest):
def test_insert_order(self):
"""test that classes of multiple types mix up mapper inserts
so that insert order of individual tables is maintained"""
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
person_mapper = mapper(Person, people, with_polymorphic=('*', person_join), polymorphic_on=person_join.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager')
mapper(Company, companies, properties={
'employees': relationship(Person,
backref='company',
order_by=person_join.c.person_id)
})
session = create_session()
c = Company(name='company1')
c.employees.append(Manager(status='AAB', manager_name='manager1', name='pointy haired boss'))
c.employees.append(Engineer(status='BBA', engineer_name='engineer1', primary_language='java', name='dilbert'))
c.employees.append(Person(status='HHH', name='joesmith'))
c.employees.append(Engineer(status='CGG', engineer_name='engineer2', primary_language='python', name='wally'))
c.employees.append(Manager(status='ABA', manager_name='manager2', name='jsmith'))
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Company).get(c.company_id), c)
class RelationshipToSubclassTest(PolymorphTest):
def test_basic(self):
"""test a relationship to an inheriting mapper where the relationship is to a subclass
but the join condition is expressed by the parent table.
also test that backrefs work in this case.
this test touches upon a lot of the join/foreign key determination code in properties.py
and creates the need for properties.py to search for conditions individually within
the mapper's local table as well as the mapper's 'mapped' table, so that relationships
requiring lots of specificity (like self-referential joins) as well as relationships requiring
more generalization (like the example here) both come up with proper results."""
mapper(Person, people)
mapper(Engineer, engineers, inherits=Person)
mapper(Manager, managers, inherits=Person)
mapper(Company, companies, properties={
'managers': relationship(Manager, backref="company")
})
sess = create_session()
c = Company(name='company1')
c.managers.append(Manager(status='AAB', manager_name='manager1', name='pointy haired boss'))
sess.add(c)
sess.flush()
sess.expunge_all()
eq_(sess.query(Company).filter_by(company_id=c.company_id).one(), c)
assert c.managers[0].company is c
class RoundTripTest(PolymorphTest):
pass
def _generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic):
"""generates a round trip test.
include_base - whether or not to include the base 'person' type in the union.
lazy_relationship - whether or not the Company relationship to People is lazy or eager.
redefine_colprop - if we redefine the 'name' column to be 'people_name' on the base Person class
use_literal_join - primary join condition is explicitly specified
"""
def test_roundtrip(self):
if with_polymorphic == 'unions':
if include_base:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
else:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, None, 'pjoin')
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'joins':
person_join = people.outerjoin(engineers).outerjoin(managers).outerjoin(boss)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'auto':
person_with_polymorphic = '*'
manager_with_polymorphic = '*'
else:
person_with_polymorphic = None
manager_with_polymorphic = None
if redefine_colprop:
person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person', properties= {'person_name':people.c.name})
else:
person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper, with_polymorphic=manager_with_polymorphic, polymorphic_identity='manager')
mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss')
mapper(Company, companies, properties={
'employees': relationship(Person, lazy=lazy_relationship,
cascade="all, delete-orphan",
backref="company", order_by=people.c.person_id
)
})
if redefine_colprop:
person_attribute_name = 'person_name'
else:
person_attribute_name = 'name'
employees = [
Manager(status='AAB', manager_name='manager1', **{person_attribute_name:'pointy haired boss'}),
Engineer(status='BBA', engineer_name='engineer1', primary_language='java', **{person_attribute_name:'dilbert'}),
]
if include_base:
employees.append(Person(**{person_attribute_name:'joesmith'}))
employees += [
Engineer(status='CGG', engineer_name='engineer2', primary_language='python', **{person_attribute_name:'wally'}),
Manager(status='ABA', manager_name='manager2', **{person_attribute_name:'jsmith'})
]
pointy = employees[0]
jsmith = employees[-1]
dilbert = employees[1]
session = create_session()
c = Company(name='company1')
c.employees = employees
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Person).get(dilbert.person_id), dilbert)
session.expunge_all()
eq_(session.query(Person).filter(Person.person_id==dilbert.person_id).one(), dilbert)
session.expunge_all()
def go():
cc = session.query(Company).get(c.company_id)
eq_(cc.employees, employees)
if not lazy_relationship:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 5)
else:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 2)
else:
self.assert_sql_count(testing.db, go, 6)
# test selecting from the query, using the base mapped table (people) as the selection criterion.
# in the case of the polymorphic Person query, the "people" selectable should be adapted to be "person_join"
eq_(
session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first(),
dilbert
)
assert session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first().person_id
eq_(
session.query(Engineer).filter(getattr(Person, person_attribute_name)=='dilbert').first(),
dilbert
)
# test selecting from the query, joining against an alias of the base "people" table. test that
# the "palias" alias does *not* get sucked up into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.query(Person).get(dilbert.person_id)
assert dilbert is session.query(Person).filter((palias.c.name=='dilbert') & (palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Engineer).filter((palias.c.name=='dilbert') & (palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Person).filter((Engineer.engineer_name=="engineer1") & (engineers.c.person_id==people.c.person_id)).first()
assert dilbert is session.query(Engineer).filter(Engineer.engineer_name=="engineer1")[0]
dilbert.engineer_name = 'hes dibert!'
session.flush()
session.expunge_all()
def go():
session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
session.expunge_all()
dilbert = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
def go():
# assert that only primary table is queried for already-present-in-session
d = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
# test standalone orphans
daboss = Boss(status='BBB', manager_name='boss', golf_swing='fore', **{person_attribute_name:'daboss'})
session.add(daboss)
assert_raises(orm_exc.FlushError, session.flush)
c = session.query(Company).first()
daboss.company = c
manager_list = [e for e in c.employees if isinstance(e, Manager)]
session.flush()
session.expunge_all()
eq_(session.query(Manager).order_by(Manager.person_id).all(), manager_list)
c = session.query(Company).first()
session.delete(c)
session.flush()
eq_(people.count().scalar(), 0)
test_roundtrip = function_named(
test_roundtrip, "test_%s%s%s_%s" % (
(lazy_relationship and "lazy" or "eager"),
(include_base and "_inclbase" or ""),
(redefine_colprop and "_redefcol" or ""),
with_polymorphic))
setattr(RoundTripTest, test_roundtrip.__name__, test_roundtrip)
for lazy_relationship in [True, False]:
for redefine_colprop in [True, False]:
for with_polymorphic in ['unions', 'joins', 'auto', 'none']:
if with_polymorphic == 'unions':
for include_base in [True, False]:
_generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic)
else:
_generate_round_trip_test(False, lazy_relationship, redefine_colprop, with_polymorphic)
|
|
from __future__ import unicode_literals
from threading import Timer
from datetime import datetime
from rtmbot.core import Plugin
from utils import word_checking as wc_utils
from utils.db import update_user_counts, get_user_counts
from utils.utils import load_json, write_json, add_plurals
import threading
OPT_IN_FILE = "data_files/opted_in.json"
WORDS_FILE = "data_files/words.json"
class PluginWordRespond(Plugin):
def __init__(self, **kwargs):
# because of the way plugins are called we must explicitly pass the
# arguments to the super
super(PluginWordRespond, self).__init__(**kwargs)
self.master_words = load_json(WORDS_FILE) #contains alternates
self.words = self._make_words_list(self.master_words)
self.opted_in = set(self._load_opted_in(OPT_IN_FILE))
# timer to send private message each day at 5pm to opted in users
x=datetime.today()
y=x.replace(day=x.day+1, hour=17, minute=0, second=0, microsecond=0)
delta_t=y-x
secs=delta_t.seconds+1
t = Timer(secs, self.job)
t.start()
def job(self):
total_counts = get_user_counts()
for user in total_counts:
the_user = {}
the_user["user"] = user
user_count = total_counts.get(user)
if not user_count:
break;
else:
self._send_count_message(the_user, user_count)
# clear the count for the user after sending the end of day message of counts
update_user_counts(user, dict())
# timer to send private message each day at 5pm to opted in users
x=datetime.today()
y=x.replace(day=x.day+1, hour=17, minute=0, second=0, microsecond=0)
delta_t=y-x
secs=delta_t.seconds+1
threading.Timer(secs, self.job).start()
def process_message(self, data):
# TODO: for debugging only, remove for prod
print(data)
# trigger opt-in flow if talking directly to bot
if data['text'].startswith("@cheeseburger_backpack"):
self._commands(data)
return
# only opted-in users should experience this workflow
if data['user'] in self.opted_in:
word_counter = wc_utils.check_for_flag_words(
data['text'], self.words
)
if word_counter:
total_counts = update_user_counts(data["user"], word_counter)
print('total counts: {}'.format(total_counts))
def _make_words_list(self, master_list):
"""
receive:
{
"fruit": {
"apple": {
"alternatives":["asparagus"],
"variations": ["applez"]
},
"banana": {
"alternatives":["broccoli"],
"variations": []
},
"pineapple": {
"alternatives":["pea"],
"variations": []
}
}
}
return:
{
"apple": ["apples", "applez"],
"banana": ["bananas"],
"pineapple": ["pineapples"]
}
"""
result = {}
for category in master_list.values():
for word, extras in category.iteritems():
result[word] = extras["variations"]
return add_plurals(result)
def _optin_flow(self, data):
# a bit hacky but why not reuse
status = wc_utils.check_for_flag_words(
data["text"],
{"optin":["opt-in"], "optout":["opt-out"]}
)
optin = status.get("optin")
optout = status.get("optout")
message = {
"channel": self._get_user_dm_channel(data),
"as_user": "true",
"text": "Optin Status Updated!"
}
if optin:
self.opted_in.add(data["user"])
write_json({"opted_in": list(self.opted_in)}, OPT_IN_FILE)
self.slack_client.api_call("chat.postMessage", **message)
elif optout:
self.opted_in.discard(data["user"])
write_json({"opted_in": list(self.opted_in)}, OPT_IN_FILE)
self.slack_client.api_call("chat.postMessage", **message)
def _build_slack_count_message(self, channel_id, count_dict):
result = {
"channel": channel_id,
"as_user": "true",
"attachments": []
}
result["attachments"].append(self._build_slack_count_attachment(
count_dict
))
return result
def _build_slack_count_attachment(self, count_dict):
attachment_template = {
"fallback": "Breakdown of words used, and possible "
"alternatives", # fallback text
"pretext": "Unpacking the Words You've Used:",
"color": "#439FE0",
"fields": [],
"footer": "Cheeseburger Backpack",
"mrkdwn_in": ["fields"]
}
for word, count in count_dict.iteritems():
alt = "No alternatives."
for category, base_word_dict in self.master_words.iteritems():
for base_word, extra_words in base_word_dict.iteritems():
if word == base_word:
if extra_words['alternatives']:
alt = ', '.join(extra_words['alternatives'])
attachment_template["fields"].append({
"title": word.capitalize(),
"value": "Count: {count}\nAlternative(s): {alt}".format(
count=count,
alt=alt
),
})
return attachment_template
def _load_opted_in(self, filepath):
# TODO this will become more complicated when we use a db
users = load_json(filepath)
if users:
return users["opted_in"]
else:
return []
def _get_user_dm_channel(self, data):
resp = self.slack_client.api_call('im.open', user=data['user'])
if resp['ok']:
return resp['channel']['id']
def _send_count_message(self, data, word_counter):
user_channel = self._get_user_dm_channel(data)
mssg_kwargs = self._build_slack_count_message(
user_channel,
word_counter
)
self.slack_client.api_call(
"chat.postMessage", **mssg_kwargs
)
def _commands(self, data):
user_channel = self._get_user_dm_channel(data)
opt_in_opts = ["optin", "optout"]
user_command = data["text"]
user_command.replace('@cheeseburger_backpack', '')
user_command.split()
if 'help' in user_command:
message = {
"channel": user_channel,
"as_user": "true",
"mrkdwn": "true",
"text": "Available Commands:\n"
"*@cheeseburger_backpack optin* - opt yourself INTO "
"updates on your words usage and suggestions for "
"changes.\n\n"
"*@cheeseburger_backpack optout* - opt yourself OUT OF "
"updates on your words usage and suggestions for "
"changes.\n\n"
"*@cheeseburger_backpack status* - display your "
"optin status.\n\n"
"*@cheeseburger_backpack help* - display this help "
"message again."
}
self.slack_client.api_call("chat.postMessage", **message)
elif 'status' in user_command:
user_status = data["user"] in self.opted_in
message = {
"channel": user_channel,
"as_user": "true",
"mrkdwn": "true",
"text": "*Optin Status:* {}".format(user_status)
}
self.slack_client.api_call("chat.postMessage", **message)
# check if optin or optout is in command, but not both,
# by comparing the two lists and returning shared values. There
# should only be 1 value.
elif len([i for i in opt_in_opts if i in user_command])==1:
self._optin_flow(data)
else:
message = {
"channel": user_channel,
"as_user": "true",
"mrkdwn": "true",
"text": "I could not understand your request. Type:\n\n"
"*@cheeseburger_backpack help*\n\nto see my command "
"options. Note: I can only understand one command at a "
"time."
}
self.slack_client.api_call("chat.postMessage", **message)
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks third-party licenses for the purposes of the Android WebView build.
The Android tree includes a snapshot of Chromium in order to power the system
WebView. This tool checks that all code uses open-source licenses compatible
with Android, and that we meet the requirements of those licenses. It can also
be used to generate an Android NOTICE file for the third-party code.
It makes use of src/tools/licenses.py and the README.chromium files on which
it depends. It also makes use of a data file, third_party_files_whitelist.txt,
which whitelists indicidual files which contain third-party code but which
aren't in a third-party directory with a README.chromium file.
"""
import glob
import imp
import optparse
import os
import re
import subprocess
import sys
import textwrap
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..'))
# Import third_party/PRESUBMIT.py via imp to avoid importing a random
# PRESUBMIT.py from $PATH, also make sure we don't generate a .pyc file.
sys.dont_write_bytecode = True
third_party = \
imp.load_source('PRESUBMIT', \
os.path.join(REPOSITORY_ROOT, 'third_party', 'PRESUBMIT.py'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools'))
import licenses
import known_issues
class InputApi(object):
def __init__(self):
self.re = re
def GetIncompatibleDirectories():
"""Gets a list of third-party directories which use licenses incompatible
with Android. This is used by the snapshot tool.
Returns:
A list of directories.
"""
result = []
for directory in _FindThirdPartyDirs():
if directory in known_issues.KNOWN_ISSUES:
result.append(directory)
continue
try:
metadata = licenses.ParseDir(directory, REPOSITORY_ROOT,
require_license_file=False)
except licenses.LicenseError as e:
print 'Got LicenseError while scanning ' + directory
raise
if metadata.get('License Android Compatible', 'no').upper() == 'YES':
continue
license = re.split(' [Ll]icenses?$', metadata['License'])[0]
if not third_party.LicenseIsCompatibleWithAndroid(InputApi(), license):
result.append(directory)
return result
def GetUnknownIncompatibleDirectories():
"""Gets a list of third-party directories which use licenses incompatible
with Android which are not present in the known_issues.py file.
This is used by the AOSP bot.
Returns:
A list of directories.
"""
incompatible_directories = frozenset(GetIncompatibleDirectories())
known_incompatible = []
for path, exclude_list in known_issues.KNOWN_INCOMPATIBLE.iteritems():
for exclude in exclude_list:
if glob.has_magic(exclude):
exclude_dirname = os.path.dirname(exclude)
if glob.has_magic(exclude_dirname):
print ('Exclude path %s contains an unexpected glob expression,' \
' skipping.' % exclude)
exclude = exclude_dirname
known_incompatible.append(os.path.normpath(os.path.join(path, exclude)))
known_incompatible = frozenset(known_incompatible)
return incompatible_directories.difference(known_incompatible)
class ScanResult(object):
Ok, Warnings, Errors = range(3)
def _CheckLicenseHeaders(excluded_dirs_list, whitelisted_files):
"""Checks that all files which are not in a listed third-party directory,
and which do not use the standard Chromium license, are whitelisted.
Args:
excluded_dirs_list: The list of directories to exclude from scanning.
whitelisted_files: The whitelist of files.
Returns:
ScanResult.Ok if all files with non-standard license headers are whitelisted
and the whitelist contains no stale entries;
ScanResult.Warnings if there are stale entries;
ScanResult.Errors if new non-whitelisted entries found.
"""
excluded_dirs_list = [d for d in excluded_dirs_list if not 'third_party' in d]
# Using a common pattern for third-partyies makes the ignore regexp shorter
excluded_dirs_list.append('third_party')
# VCS dirs
excluded_dirs_list.append('.git')
excluded_dirs_list.append('.svn')
# Build output
excluded_dirs_list.append('out/Debug')
excluded_dirs_list.append('out/Release')
# 'Copyright' appears in license agreements
excluded_dirs_list.append('chrome/app/resources')
# This is a test output directory
excluded_dirs_list.append('chrome/tools/test/reference_build')
# blink style copy right headers.
excluded_dirs_list.append('content/shell/renderer/test_runner')
# blink style copy right headers.
excluded_dirs_list.append('content/shell/tools/plugin')
# This is tests directory, doesn't exist in the snapshot
excluded_dirs_list.append('content/test/data')
# This is a tests directory that doesn't exist in the shipped product.
excluded_dirs_list.append('gin/test')
# This is a test output directory
excluded_dirs_list.append('data/dom_perf')
# This is a tests directory that doesn't exist in the shipped product.
excluded_dirs_list.append('tools/perf/page_sets')
excluded_dirs_list.append('tools/perf/page_sets/tough_animation_cases')
# Histogram tools, doesn't exist in the snapshot
excluded_dirs_list.append('tools/histograms')
# Swarming tools, doesn't exist in the snapshot
excluded_dirs_list.append('tools/swarming_client')
# Arm sysroot tools, doesn't exist in the snapshot
excluded_dirs_list.append('arm-sysroot')
# Data is not part of open source chromium, but are included on some bots.
excluded_dirs_list.append('data')
# This is not part of open source chromium, but are included on some bots.
excluded_dirs_list.append('skia/tools/clusterfuzz-data')
args = ['android_webview/tools/find_copyrights.pl',
'.'
] + excluded_dirs_list
p = subprocess.Popen(args=args, cwd=REPOSITORY_ROOT, stdout=subprocess.PIPE)
lines = p.communicate()[0].splitlines()
offending_files = []
allowed_copyrights = '^(?:\*No copyright\*' \
'|20[0-9][0-9](?:-20[0-9][0-9])? The Chromium Authors\. ' \
'All rights reserved.*)$'
allowed_copyrights_re = re.compile(allowed_copyrights)
for l in lines:
entries = l.split('\t')
if entries[1] == "GENERATED FILE":
continue
copyrights = entries[1].split(' / ')
for c in copyrights:
if c and not allowed_copyrights_re.match(c):
offending_files.append(os.path.normpath(entries[0]))
break
unknown = set(offending_files) - set(whitelisted_files)
if unknown:
print 'The following files contain a third-party license but are not in ' \
'a listed third-party directory and are not whitelisted. You must ' \
'add the following files to the whitelist.\n%s' % \
'\n'.join(sorted(unknown))
stale = set(whitelisted_files) - set(offending_files)
if stale:
print 'The following files are whitelisted unnecessarily. You must ' \
' remove the following files from the whitelist.\n%s' % \
'\n'.join(sorted(stale))
if unknown:
return ScanResult.Errors
elif stale:
return ScanResult.Warnings
else:
return ScanResult.Ok
def _ReadFile(path):
"""Reads a file from disk.
Args:
path: The path of the file to read, relative to the root of the repository.
Returns:
The contents of the file as a string.
"""
return open(os.path.join(REPOSITORY_ROOT, path), 'rb').read()
def _FindThirdPartyDirs():
"""Gets the list of third-party directories.
Returns:
The list of third-party directories.
"""
# Please don't add here paths that have problems with license files,
# as they will end up included in Android WebView snapshot.
# Instead, add them into known_issues.py.
prune_paths = [
# Placeholder directory, no third-party code.
os.path.join('third_party', 'adobe'),
# Apache 2.0 license. See
# https://code.google.com/p/chromium/issues/detail?id=140478.
os.path.join('third_party', 'bidichecker'),
# Isn't checked out on clients
os.path.join('third_party', 'gles2_conform'),
# The llvm-build doesn't exist for non-clang builder
os.path.join('third_party', 'llvm-build'),
# Binaries doesn't apply to android
os.path.join('third_party', 'widevine'),
# third_party directories in this tree aren't actually third party, but
# provide a way to shadow experimental buildfiles into those directories.
os.path.join('tools', 'gn', 'secondary'),
# Not shipped, Chromium code
os.path.join('tools', 'swarming_client'),
]
third_party_dirs = licenses.FindThirdPartyDirs(prune_paths, REPOSITORY_ROOT)
return licenses.FilterDirsWithFiles(third_party_dirs, REPOSITORY_ROOT)
def _Scan():
"""Checks that license meta-data is present for all third-party code and
that all non third-party code doesn't contain external copyrighted code.
Returns:
ScanResult.Ok if everything is in order;
ScanResult.Warnings if there are non-fatal problems (e.g. stale whitelist
entries)
ScanResult.Errors otherwise.
"""
third_party_dirs = _FindThirdPartyDirs()
# First, check designated third-party directories using src/tools/licenses.py.
all_licenses_valid = True
for path in sorted(third_party_dirs):
try:
licenses.ParseDir(path, REPOSITORY_ROOT)
except licenses.LicenseError, e:
if not (path in known_issues.KNOWN_ISSUES):
print 'Got LicenseError "%s" while scanning %s' % (e, path)
all_licenses_valid = False
# Second, check for non-standard license text.
files_data = _ReadFile(os.path.join('android_webview', 'tools',
'third_party_files_whitelist.txt'))
whitelisted_files = []
for line in files_data.splitlines():
match = re.match(r'([^#\s]+)', line)
if match:
whitelisted_files.append(match.group(1))
licenses_check = _CheckLicenseHeaders(third_party_dirs, whitelisted_files)
return licenses_check if all_licenses_valid else ScanResult.Errors
def GenerateNoticeFile():
"""Generates the contents of an Android NOTICE file for the third-party code.
This is used by the snapshot tool.
Returns:
The contents of the NOTICE file.
"""
third_party_dirs = _FindThirdPartyDirs()
# Don't forget Chromium's LICENSE file
content = [_ReadFile('LICENSE')]
# We provide attribution for all third-party directories.
# TODO(steveblock): Limit this to only code used by the WebView binary.
for directory in sorted(third_party_dirs):
metadata = licenses.ParseDir(directory, REPOSITORY_ROOT,
require_license_file=False)
license_file = metadata['License File']
if license_file and license_file != licenses.NOT_SHIPPED:
content.append(_ReadFile(license_file))
return '\n'.join(content)
def main():
class FormatterWithNewLines(optparse.IndentedHelpFormatter):
def format_description(self, description):
paras = description.split('\n')
formatted_paras = [textwrap.fill(para, self.width) for para in paras]
return '\n'.join(formatted_paras) + '\n'
parser = optparse.OptionParser(formatter=FormatterWithNewLines(),
usage='%prog [options]')
parser.description = (__doc__ +
'\nCommands:\n' \
' scan Check licenses.\n' \
' notice Generate Android NOTICE file on stdout. \n' \
' incompatible_directories Scan for incompatibly'
' licensed directories.')
(_, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
return ScanResult.Errors
if args[0] == 'scan':
scan_result = _Scan()
if scan_result == ScanResult.Ok:
print 'OK!'
return scan_result
elif args[0] == 'notice':
print GenerateNoticeFile()
return ScanResult.Ok
elif args[0] == 'incompatible_directories':
incompatible_directories = GetUnknownIncompatibleDirectories()
if incompatible_directories:
print ("Incompatibly licensed directories found:\n" +
"\n".join(sorted(incompatible_directories)))
return ScanResult.Errors
return ScanResult.Ok
parser.print_help()
return ScanResult.Errors
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
"""
sphinxjp.themes.revealjs.directives
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: tell-k <ffk2005@gmail.com>
:copyright: tell-k. All Rights Reserved.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
from docutils.parsers.rst import Directive
from . import compat
__docformat__ = 'reStructuredText'
class revealjs(nodes.General, nodes.Element):
""" node for revealjs """
class rv_code(nodes.General, nodes.Element):
""" node for revealjs code section """
class rv_small(nodes.General, nodes.Element):
""" node for revealjs small text section """
class rv_note(nodes.General, nodes.Element):
""" node for revealjs presentation note """
def heading(argument):
""" directives choices for heading tag """
return directives.choice(argument, ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'))
class RevealjsDirective(Directive):
""" Reveal.JS slide entry """
has_content = True
required_arguments = 0
optional_arguments = 100
final_argument_whitespace = False
option_spec = {
'id': directives.unchanged,
'class': directives.class_option,
'noheading': directives.flag,
'title-heading': heading,
'subtitle': directives.unchanged,
'subtitle-heading': directives.unchanged,
'data-autoslide': directives.unchanged,
'data-markdown': directives.unchanged,
'data-transition': directives.unchanged,
'data-transition-speed': directives.unchanged,
'data-background': directives.unchanged,
'data-background-repeat': directives.unchanged,
'data-background-size': directives.unchanged,
'data-background-transition': directives.unchanged,
'data-state': directives.unchanged,
'data-separator': directives.unchanged,
'data-separator-vertical': directives.unchanged,
'data-separator-notes': directives.unchanged,
'data-charset': directives.unchanged,
}
node_class = revealjs
def run(self):
""" build revealjs node """
set_classes(self.options)
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
if "data-markdown" not in self.options:
self.state.nested_parse(self.content, self.content_offset, node)
if self.arguments:
node['title'] = " ".join(self.arguments)
node['noheading'] = ('noheading' in self.options)
options_list = (
'id',
'title-heading',
'subtitle-heading',
'data-autoslide',
'data-transition',
'data-transition-speed',
'data-background',
'data-background-repeat',
'data-background-size',
'data-background-transition',
'data-state',
'data-markdown',
'data-separator',
'data-separator-vertical',
'data-separator-notes',
'data-charset',
)
for option in options_list:
if option in self.options:
node[option] = self.options.get(option)
return [node]
class RvSmallDirective(Directive):
"""
Create small text tag.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
node_class = rv_small
def run(self):
""" build rv_small node """
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class RvNoteDirective(Directive):
"""
Directive for a notes tag.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
node_class = rv_note
def run(self):
""" build rv_note node """
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class RvCodeDirective(Directive):
"""
Directive for a code block with highlight.js
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'id': directives.unchanged,
'class': directives.class_option,
}
node_class = rv_code
def run(self):
""" build rv_code node """
set_classes(self.options)
self.assert_has_content()
node = self.node_class('\n'.join(self.content), **self.options)
return [node]
def visit_revealjs(self, node):
""" build start tag for revealjs """
section_attr = {}
markdown_headings = {"h1": "#", "h2": "##", "h3": "###",
"h4": "####", "h5": "#####", "h6": "######"}
if node.get("id"):
section_attr.update({"ids": [node.get("id")]})
attr_list = (
'data-autoslide',
'data-transition',
'data-transition-speed',
'data-background',
'data-background-repeat',
'data-background-size',
'data-background-transition',
'data-state',
'data-markdown',
'data-separator',
'data-separator-vertical',
'data-separator-notes',
'data-charset',
)
for attr in attr_list:
if node.get(attr) is not None:
section_attr.update({attr: node.get(attr)})
title = None
if node.get("title") and not node.get('noheading'):
title = node.get("title")
title_heading = node.get('title-heading', 'h2')
subtitle = node.get("subtitle")
subtitle_heading = node.get('subtitle-heading', 'h3')
if node.get("data-markdown") is not None:
title_base = compat.text("%(heading)s %(title)s \n")
title_text = None
if title:
title_text = title_base % dict(
heading=markdown_headings.get(title_heading),
title=title
)
subtitle_text = None
if subtitle:
subtitle_text = title_base % dict(
heading=markdown_headings.get(subtitle_heading),
title=subtitle
)
else:
title_base = compat.text("<%(heading)s>%(title)s</%(heading)s>\n")
title_text = None
if title:
title_text = title_base % dict(
title=title,
heading=title_heading)
subtitle_text = None
if subtitle:
subtitle_text = title_base % dict(
title=subtitle,
heading=subtitle_heading)
if node.get("data-markdown") is not None:
self.body.append(self.starttag(node, 'section', **section_attr))
if node.get("data-markdown") == compat.text(""):
self.body.append("<script type='text/template'>\n")
if title_text:
self.body.append(title_text)
if subtitle_text:
self.body.append(subtitle_text)
self.body.append(node.rawsource)
self.body.append("</script>\n")
else:
self.body.append(self.starttag(node, 'section', **section_attr))
if title_text:
self.body.append(title_text)
if subtitle_text:
self.body.append(subtitle_text)
self.set_first_last(node)
def depart_revealjs(self, node=None):
""" build end tag for revealjs """
self.body.append('</section>\n')
def visit_rv_code(self, node):
""" build start tag for rv_code """
self.body.append(self.starttag(node, 'pre'))
self.body.append("<code data-trim contenteditable>")
self.body.append(compat.escape_html(node.rawsource))
def depart_rv_code(self, node=None):
""" build end tag for rv_code """
self.body.append("</code>")
self.body.append("</pre>\n")
def visit_rv_small(self, node):
""" build start tag for rv_small """
self.body.append(self.starttag(node, 'small'))
self.set_first_last(node)
def depart_rv_small(self, node=None):
""" build end tag for rv_small"""
self.body.append("</small>\n")
def visit_rv_note(self, node):
""" build start tag for rv_note """
self.body.append(self.starttag(node, 'aside', **{'class': 'notes'}))
self.set_first_last(node)
def depart_rv_note(self, node=None):
""" build end tag for rv_note """
self.body.append("</aside>\n")
def setup(app):
"""Initialize """
app.add_node(revealjs, html=(visit_revealjs, depart_revealjs))
app.add_node(rv_code, html=(visit_rv_code, depart_rv_code))
app.add_node(rv_note, html=(visit_rv_note, depart_rv_note))
app.add_node(rv_small, html=(visit_rv_small, depart_rv_small))
app.add_directive('revealjs', RevealjsDirective)
app.add_directive('rv_code', RvCodeDirective)
app.add_directive('rv_note', RvNoteDirective)
app.add_directive('rv_small', RvSmallDirective)
return app
|
|
"""
The ``LineEdit`` and ``MultiLineEdit`` widgets provide a way for the user
to input text.
.. UIExample:: 100
from flexx import app, event, ui
class Example(ui.Widget):
def init(self):
with ui.VBox():
self.line = ui.LineEdit(placeholder_text='type here')
self.l1 = ui.Label(html='<i>when user changes text</i>')
self.l2 = ui.Label(html='<i>when unfocusing or hitting enter </i>')
self.l3 = ui.Label(html='<i>when submitting (hitting enter)</i>')
ui.Widget(flex=1)
@event.reaction('line.user_text')
def when_user_changes_text(self, *events):
self.l1.set_text('user_text: ' + self.line.text)
@event.reaction('line.user_done')
def when_user_is_done_changing_text(self, *events):
self.l2.set_text('user_done: ' + self.line.text)
@event.reaction('line.submit')
def when_user_submits_text(self, *events):
self.l3.set_text('submit: ' + self.line.text)
"""
from ... import event
from . import Widget
class LineEdit(Widget):
""" An input widget to edit a line of text.
The ``node`` of this widget is a text
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_.
"""
DEFAULT_MIN_SIZE = 100, 28
CSS = """
.flx-LineEdit {
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-LineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
## Properties
text = event.StringProp(settable=True, doc="""
The current text of the line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
password_mode = event.BoolProp(False, settable=True, doc="""
Whether the insered text should be hidden.
""")
placeholder_text = event.StringProp(settable=True, doc="""
The placeholder text (shown when the text is an empty string).
""")
autocomp = event.TupleProp(settable=True, doc="""
A tuple/list of strings for autocompletion. Might not work in all browsers.
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the line edit is disabled.
""")
## Methods, actions, emitters
def _create_dom(self):
global window
# Create node element
node = window.document.createElement('input')
node.setAttribute('type', 'input')
node.setAttribute('list', self.id)
self._autocomp = window.document.createElement('datalist')
self._autocomp.id = self.id
node.appendChild(self._autocomp)
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
#if IE10:
# self._addEventListener(self.node, 'change', f1, False)
return node
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text, either by
moving the focus elsewhere, or by hitting enter.
Has ``old_value`` and ``new_value`` attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def submit(self):
""" Event emitted when the user strikes the enter or return key
(but not when losing focus). Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
self.user_done()
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def key_down(self, e):
# Prevent propating the key
ev = super().key_down(e)
pkeys = 'Escape', # keys to propagate
if (ev.modifiers and ev.modifiers != ('Shift', )) or ev.key in pkeys:
pass
else:
e.stopPropagation()
if ev.key in ('Enter', 'Return'):
self.submit()
# Nice to blur on mobile, since it hides keyboard, but less nice on desktop
# self.node.blur()
elif ev.key == 'Escape':
self.node.blur()
return ev
## Reactions
@event.reaction
def __text_changed(self):
self.node.value = self.text
@event.reaction
def __password_mode_changed(self):
self.node.type = ['text', 'password'][int(bool(self.password_mode))]
@event.reaction
def __placeholder_text_changed(self):
self.node.placeholder = self.placeholder_text
# note: this works in the browser but not in e.g. firefox-app
@event.reaction
def __autocomp_changed(self):
global window
autocomp = self.autocomp
# Clear
for op in self._autocomp:
self._autocomp.removeChild(op)
# Add new options
for option in autocomp:
op = window.document.createElement('option')
op.value = option
self._autocomp.appendChild(op)
@event.reaction
def __disabled_changed(self):
if self.disabled:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
class MultiLineEdit(Widget):
""" An input widget to edit multiple lines of text.
The ``node`` of this widget is a
`<textarea> <https://developer.mozilla.org/docs/Web/HTML/Element/textarea>`_.
"""
DEFAULT_MIN_SIZE = 100, 50
CSS = """
.flx-MultiLineEdit {
resize: none;
overflow-y: scroll;
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-MultiLineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
text = event.StringProp(settable=True, doc="""
The current text of the multi-line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
def _create_dom(self):
node = window.document.createElement('textarea')
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
return node
@event.reaction
def __text_changed(self):
self.node.value = self.text
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text by
moving the focus elsewhere. Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SourceControlConfigurationsOperations:
"""SourceControlConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs
) -> "_models.SourceControlConfiguration":
"""Gets details of the Source Control Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControlConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.models.SourceControlConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'sourceControlConfigurationName': self._serialize.url("source_control_configuration_name", source_control_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
source_control_configuration: "_models.SourceControlConfiguration",
**kwargs
) -> "_models.SourceControlConfiguration":
"""Create a new Kubernetes Source Control Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:param source_control_configuration: Properties necessary to Create KubernetesConfiguration.
:type source_control_configuration: ~azure.mgmt.kubernetesconfiguration.models.SourceControlConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControlConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.models.SourceControlConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'sourceControlConfigurationName': self._serialize.url("source_control_configuration_name", source_control_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(source_control_configuration, 'SourceControlConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'sourceControlConfigurationName': self._serialize.url("source_control_configuration_name", source_control_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""This will delete the YAML file used to set up the Source control configuration, thus stopping
future sync from the source repo.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
source_control_configuration_name=source_control_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'sourceControlConfigurationName': self._serialize.url("source_control_configuration_name", source_control_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
def list(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
**kwargs
) -> AsyncIterable["_models.SourceControlConfigurationList"]:
"""List all Source Control Configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or ~azure.mgmt.kubernetesconfiguration.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SourceControlConfigurationList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.models.SourceControlConfigurationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfigurationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterRp': self._serialize.url("cluster_rp", cluster_rp, 'str'),
'clusterResourceName': self._serialize.url("cluster_resource_name", cluster_resource_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SourceControlConfigurationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations'} # type: ignore
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import view_base
from models.topology import TopologyWatcher
from xml.dom import minidom
LOG = logging.getLogger('ryu.gui')
class WebsocketView(view_base.ViewBase):
def __init__(self, ws):
super(WebsocketView, self).__init__()
self.ws = ws
self.address = None
self.watcher = None
def run(self):
while True:
msg = self.ws.receive()
if msg is not None:
try:
msg = json.loads(msg)
except:
LOG.debug("json parse error: %s", msg)
continue
self._recv_message(msg)
else:
if self.watcher:
self.watcher.stop()
break
self.ws.close()
LOG.info('Websocket: closed.')
return self.null_response()
def _send_message(self, msg_name, address, body=None):
message = {}
message['message'] = msg_name
message['host'], message['port'] = address.split(':')
message['body'] = body
LOG.debug("Websocket: send msg.\n%s", json.dumps(message, indent=2))
self.ws.send(json.dumps(message))
def _recv_message(self, msg):
LOG.debug('Websocket: recv msg.\n%s', json.dumps(msg, indent=2))
message = msg.get('message')
body = msg.get('body')
if message == 'rest_update':
self._watcher_start(body)
elif message == 'watching_switch_update':
self._watching_switch_update(body)
elif message == 'open_wireshark':
self._open_wireshark(body)
else:
return
def _watcher_start(self, body):
address = '%s:%s' % (body['host'], body['port'])
self.address = address
if self.watcher:
self.watcher.stop()
self.watcher = TopologyWatcher(
update_handler=self.update_handler,
rest_error_handler=self.rest_error_handler)
self.watcher.start(address)
def _watching_switch_update(self, body):
pass
def _open_wireshark(self, body):
interface = '%s' % (body['interface'])
import os
os.system("sudo wireshark -i "+interface+" -k &")
# called by watcher when topology update
def update_handler(self, address, delta):
if self.address != address:
# user be watching the another controller already
return
LOG.debug(delta)
self._send_message('rest_connected', address)
self._send_del_links(address, delta.deleted)
self._send_del_ports(address, delta.deleted)
self._send_del_switches(address, delta.deleted)
self._send_add_switches(address, delta.added)
self._send_add_ports(address, delta.added)
self._send_add_links(address, delta.added)
def _send_add_switches(self, address, topo):
body = self._build_switches_message(topo)
if body:
self._send_message('add_switches', address, body)
def _send_del_switches(self, address, topo):
body = self._build_switches_message(topo)
if body:
self._send_message('del_switches', address, body)
def _build_switches_message(self, topo):
body = []
# Read switch positions from network.xml
from xml.dom import minidom
position={}
xmldoc = None
try:
xmldoc = minidom.parse('network.xml')
itemlist = xmldoc.getElementsByTagName('node')
for s in itemlist:
n = s.attributes['id'].value
# Remove the N char at the beginning
n = int(n[1:])
x = s.getElementsByTagName('x')[0].firstChild.data
y = s.getElementsByTagName('y')[0].firstChild.data
position[n]=(int(float(x)), int(float(y)))
except Exception, detail:
print("Failure to parse network.xml: %s" % str(detail))
for s in topo['switches']:
position[int(float(s.dpid))]=(-1,-1)
# Nodes creation
for s in topo['switches']:
pos = {}
pos['x']=position[int(float(s.dpid))][0]
pos['y']=position[int(float(s.dpid))][1]
S = {'dpid': s.dpid, 'ports': {}, 'pos': pos}
for p in s.ports:
S['ports'][p.port_no] = p.to_dict()
body.append(S)
return body
def _send_add_ports(self, address, topo):
body = self._build_ports_message(topo)
if body:
self._send_message('add_ports', address, body)
def _send_del_ports(self, address, topo):
body = self._build_ports_message(topo)
if body:
self._send_message('del_ports', address, body)
def _build_ports_message(self, topo):
# send only except new added switches
ports = set(topo['ports'])
for s in topo['switches']:
ports -= set(s.ports)
body = []
for p in ports:
body.append(p.to_dict())
return body
def _send_add_links(self, address, topo):
body = self._build_links_message(topo)
if body:
self._send_message('add_links', address, body)
def _send_del_links(self, address, topo):
body = self._build_links_message(topo)
if body:
self._send_message('del_links', address, body)
def _build_links_message(self, topo):
body = []
for link in topo['links']:
# handle link as undirected
if link.src.dpid > link.dst.dpid:
continue
p1 = link.src.to_dict()
p2 = link.dst.to_dict()
L = {'p1': p1.copy(), 'p2': p2.copy()}
L['p1']['peer'] = p2.copy()
L['p2']['peer'] = p1.copy()
body.append(L)
return body
# called by watcher when rest api error
def rest_error_handler(self, address, e):
LOG.debug('REST API Error: %s', e)
self._send_message('rest_disconnected', address)
|
|
import os
from collections import defaultdict
from configparser import ConfigParser, _UNSET, NoSectionError, NoOptionError
from functools import partial
MY_DIR = os.path.dirname(__file__)
def absdir(path):
return os.path.abspath(os.path.join(MY_DIR, path))
# -------------------------------------------
# Subclass the config parser to be able to obtain
# options from the default config
# -------------------------------------------
def setpaths(conf, path):
secs = ['paths', 'files']
for sec in secs:
if sec in conf.sections():
for opt in conf[sec]:
v = conf[sec][opt]
conf.set(sec, opt, os.path.abspath(os.path.join(os.path.dirname(path), v)))
class PathRelativeConfigParser(ConfigParser):
def __init__(self, *args, path=None, **kwargs):
super().__init__(*args, **kwargs)
setpaths(self, path)
def read(self, filenames, encoding=None):
super().read(filenames, encoding=encoding)
if isinstance(filenames, str):
setpaths(self, filenames)
@classmethod
def load(cls, filename):
prcp = cls()
prcp.read(filename)
return prcp
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
if section not in self.sections():
return fallback
else:
return super().get(section, option, raw=raw, vars=vars, fallback=fallback)
# -------------------------------------------
# The following options are concerned with various
# folders for holding temporary and debug files
# are.
# -------------------------------------------
# The directory in which to place the human readable feature files.
def FEAT_DIR(obj):
return getattr(obj, 'feat_dir')
# Directory to the gold standard data for evaluation.
def GOLD_DIR(obj):
return getattr(obj, 'gold_dir')
# Directory in which to place output classified files
def OUT_DIR(obj):
return getattr(obj, 'classified_dir')
# Whether or not to output debugging information
def DEBUG_ON(obj):
return getattr(obj, 'debug_on')
# The directory in which to store the information about the classifier feature
# weights, and raw labels
def DEBUG_DIR(obj):
return getattr(obj, 'debug_dir')
# -------------------------------------------
# Path to various text files
# -------------------------------------------
# Large English language wordlist.
EN_WORDLIST = 'en_wordlist'
# List of gloss-line words extracted from ODIN-2.
# 1
GLS_WORDLIST = 'gls_wordlist'
# List of meta line words extracted from ODIN-2.1
MET_WORDLIST = 'met_wordlist'
# List of language names
LNG_NAMES = 'lng_names'
thresh_dict = {}
def get_thresh(config, var):
global thresh_dict
if var not in thresh_dict:
thresh_dict[var] = config.getfloat('thresholds', var)
return thresh_dict.get(var)
def HIGH_OOV_THRESH(config): return get_thresh(config, 'high_oov')
def MED_OOV_THRESH(config): return get_thresh(config, 'med_oov')
def HIGH_ISCORE_THRESH(config): return get_thresh(config, 'high_iscore')
def MED_ISCORE_THRESH(config): return get_thresh(config, 'med_iscore')
def LOW_ISCORE_THRESH(config): return get_thresh(config, 'low_iscore')
# -------------------------------------------
# Load the Wordlist if it is defined in the config.
# -------------------------------------------
class WordlistFile(set):
def __init__(self, path):
super().__init__()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if line.strip():
self.add(line.split()[0])
USE_BI_LABELS = 'use_bi_labels'
# Some lines appear as combinations of labels, such as "L-G-T" for all
# three on a single line. If this is set to true, these types of
# combined labels are allowed. If set to false, only the first
# of the multiple labels will be used.
USE_MULTI_LABELS = 'use_multi_labels'
# "Flags" are additional information that is intended to be included in
# the information about the line, such as +AC (for Author Citation)
# or +LN (for Language Name). These are stripped out by default, as
# otherwise they would result in an explosion of labels.
STRIP_FLAGS = 'strip_flags'
# =============================================================================
# Feature selection.
#
# In this section, various features are defined and can be enabled or
# disabled by the user. Read the comments, as some definitions are constants
# and should not be edited.
# =============================================================================
# -------------------------------------------
# High-level features.
#
# Set these to True or False, depending
# on whether you want that feature set enabled
# or not.
# -------------------------------------------
# Use the freki-block based features
FREKI_FEATS_ENABLED = True
# Use the text-based features
TEXT_FEATS_ENABLED = True
# -------------------------------------------
# These three features control whether the
# features are included for the previous line,
# the line before that (prev_prev), or the next
# line.
# -------------------------------------------
true_vals = set(['t','true','1','on','enabled'])
def getbool(args, k):
val = args.get(k, False)
return str(val).lower() in true_vals
def USE_PREV_LINE(args):
return getbool(args, 'use_prev_line')
# return args.getboolean('featuresets', 'use_prev_line')
def USE_PREV_PREV_LINE(args):
return getbool(args, 'use_prev_prev_line')
# return args.getboolean('featuresets', 'use_prev_prev_line')
def USE_NEXT_LINE(args):
return getbool(args, 'use_next_line')
# return args.getboolean('featuresets', 'use_next_line')
# -------------------------------------------
# FEATURE CONSTANTS
#
# Associating a variable with the text string used in the config file.
# -------------------------------------------
F_IS_INDENTED = 'is_indented'
F_IS_FIRST_PAGE = 'is_first_page'
F_PREV_LINE_SAME_BLOCK = 'prev_line_same_block'
F_NEXT_LINE_SAME_BLOCK = 'next_line_same_block'
F_HAS_NONSTANDARD_FONT = 'has_nonstandard_font'
F_HAS_SMALLER_FONT = 'has_smaller_font'
F_HAS_LARGER_FONT = 'has_larger_font'
F_HIGH_ISCORE = 'f_high_iscore'
F_MED_ISCORE = 'f_med_iscore'
F_LOW_ISCORE = 'f_low_iscore'
# List of all the above
F_LIST = [F_IS_INDENTED, F_IS_FIRST_PAGE, F_PREV_LINE_SAME_BLOCK, F_NEXT_LINE_SAME_BLOCK, F_HAS_NONSTANDARD_FONT, F_HAS_SMALLER_FONT, F_HAS_LARGER_FONT, F_HIGH_ISCORE, F_MED_ISCORE, F_LOW_ISCORE]
T_PREV_TAG = 'prev_tag'
T_BASIC = 'words'
T_HAS_LANGNAME = 'has_langname'
T_HAS_GRAMS = 'has_grams'
T_HAS_PARENTHETICAL = 'has_parenthetical'
T_HAS_CITATION = 'has_citation'
T_HAS_ASTERISK = 'has_asterisk'
T_HAS_UNDERSCORE = 'has_underscore'
T_HAS_BRACKETING = 'has_bracketing'
T_HAS_QUOTATION = 'has_quotation'
T_HAS_NUMBERING = 'has_numbering'
T_HAS_LEADING_WHITESPACE = 'has_leading_whitespace'
T_HIGH_OOV_RATE = 'high_oov_rate'
T_MED_OOV_RATE = 'med_oov_rate'
T_HIGH_GLS_OOV_RATE = 'high_gls_oov'
T_HIGH_MET_OOV_RATE = 'high_met_oov'
T_MED_GLS_OOV_RATE = 'med_gls_oov'
T_HAS_JPN = 'has_jpn'
T_HAS_GRK = 'has_grk'
T_HAS_KOR = 'has_kor'
T_HAS_CYR = 'has_cyr'
T_HAS_ACC = 'has_acc_lat'
T_HAS_DIA = 'has_dia'
T_HAS_UNI = 'has_uni'
T_HAS_YEAR = 'has_year'
T_LIST = [T_BASIC, T_HAS_LANGNAME, T_HAS_GRAMS, T_HAS_PARENTHETICAL, T_HAS_CITATION, T_HAS_ASTERISK, T_HAS_UNDERSCORE, T_HAS_BRACKETING,
T_HAS_QUOTATION, T_HAS_NUMBERING, T_HAS_LEADING_WHITESPACE, T_HIGH_OOV_RATE, T_MED_OOV_RATE, T_HIGH_GLS_OOV_RATE, T_MED_GLS_OOV_RATE,
T_HIGH_GLS_OOV_RATE, T_MED_GLS_OOV_RATE, T_HIGH_MET_OOV_RATE,
T_HAS_JPN, T_HAS_GRK, T_HAS_KOR, T_HAS_CYR, T_HAS_ACC, T_HAS_DIA, T_HAS_UNI, T_HAS_YEAR]
# =============================================================================
# EDIT THIS SECTION
# =============================================================================
# -------------------------------------------
# Now, to enable/disable a particular feature,
# just comment out the line the feature is
# contained on.
# -------------------------------------------
def enabled_feats(config: ConfigParser, section, featlist):
enabled = set([])
for feat in featlist:
if config.has_option(section, feat):
b = config.getboolean(section, feat)
if b:
enabled.add(feat)
return enabled
_enabled_freki_feats = None
_enabled_text_feats = None
def ENABLED_FREKI_FEATS(config: ConfigParser):
global _enabled_freki_feats
if _enabled_freki_feats is None:
_enabled_freki_feats = enabled_feats(config, 'freki_features', F_LIST)
return _enabled_freki_feats
def ENABLED_TEXT_FEATS(config: ConfigParser):
global _enabled_text_feats
if _enabled_text_feats is None:
_enabled_text_feats = enabled_feats(config, 'text_features', T_LIST)
return _enabled_text_feats
# =============================================================================
# Regular Expressions
#
# These are multiline expressions that were initially used for IGT detection.
#
# These are currently unused, but could be included to fire for lines which
# find themselves contained in such a regex.
# =============================================================================
REGEXES = '''
\s*(\()\d*\).*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\d*\)\s\w\..*\n
\s*.*\n
\s\[`'"].*\n
~
\s*(\(\d)*\)\s*\(.*\n
\s.*\n
\s*.*\n
\s\[`'"].*\n
~
\s*(\()\d*\).*\n
\s*\w\..*\n
\s*.*\n
\s\[`'"].*\n
~
\s*\w.\s*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*\w\)\s*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\w*\).*\n
\s*.*\n
\s*\[`'"].*\n
~
//added 02-03-2005
\s*\d.*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\d*\).*\n
.*\n
\s*.*\n
\s*\[`'"].*\n
~'''
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import threading
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
from oslo_serialization import jsonutils
from oslo_utils import importutils
import designate.context
import designate.exceptions
from designate import objects
profiler = importutils.try_import('osprofiler.profiler')
__all__ = [
'init',
'cleanup',
'set_defaults',
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
]
CONF = cfg.CONF
EXPECTED_EXCEPTION = threading.local()
NOTIFICATION_TRANSPORT = None
NOTIFIER = None
TRANSPORT = None
# NOTE: Additional entries to designate.exceptions goes here.
ALLOWED_EXMODS = [
designate.exceptions.__name__,
'designate.backend.impl_dynect'
]
EXTRA_EXMODS = []
def init(conf):
global TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT
exmods = get_allowed_exmods()
TRANSPORT = create_transport(get_transport_url())
NOTIFICATION_TRANSPORT = messaging.get_notification_transport(
conf, allowed_remote_exmods=exmods)
serializer = RequestContextSerializer(JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
serializer=serializer)
def initialized():
return None not in [TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT]
def cleanup():
global TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if NOTIFICATION_TRANSPORT is None:
raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
if NOTIFIER is None:
raise AssertionError("'NOTIFIER' must not be None")
TRANSPORT.cleanup()
NOTIFICATION_TRANSPORT.cleanup()
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
def set_defaults(control_exchange):
messaging.set_transport_defaults(control_exchange)
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS + CONF.allowed_remote_exmods
class JsonPayloadSerializer(messaging.NoOpSerializer):
@staticmethod
def serialize_entity(context, entity):
return jsonutils.to_primitive(entity, convert_instances=True)
class DesignateObjectSerializer(messaging.NoOpSerializer):
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if iterable == set:
# NOTE: A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif hasattr(entity, 'to_primitive') and callable(entity.to_primitive):
entity = entity.to_primitive()
return jsonutils.to_primitive(entity, convert_instances=True)
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'designate_object.name' in entity:
entity = objects.DesignateObject.from_primitive(entity)
elif isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
_context = context.to_dict()
if profiler is not None:
prof = profiler.get()
if prof is not None:
trace_info = {
"hmac_key": prof.hmac_key,
"base_id": prof.get_base_id(),
"parent_id": prof.get_id()
}
_context.update({"trace_info": trace_info})
return _context
def deserialize_context(self, context):
trace_info = context.pop("trace_info", None)
if trace_info is not None:
if profiler is not None:
profiler.init(**trace_info)
return designate.context.DesignateContext.from_dict(context)
def get_transport_url(url_str=None):
return messaging.TransportURL.parse(CONF, url_str)
def get_client(target, version_cap=None, serializer=None):
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(
TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer
)
def get_server(target, endpoints, serializer=None):
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
access_policy = rpc_dispatcher.DefaultRPCAccessPolicy
return messaging.get_rpc_server(
TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer,
access_policy=access_policy
)
def get_notification_listener(targets, endpoints, serializer=None, pool=None):
if NOTIFICATION_TRANSPORT is None:
raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
if serializer is None:
serializer = JsonPayloadSerializer()
return messaging.get_notification_listener(
NOTIFICATION_TRANSPORT,
targets,
endpoints,
executor='eventlet',
pool=pool,
serializer=serializer
)
def get_notifier(service=None, host=None, publisher_id=None):
if NOTIFIER is None:
raise AssertionError("'NOTIFIER' must not be None")
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
def create_transport(url):
exmods = get_allowed_exmods()
return messaging.get_rpc_transport(CONF,
url=url,
allowed_remote_exmods=exmods)
def expected_exceptions():
def outer(f):
@functools.wraps(f)
def exception_wrapper(self, *args, **kwargs):
if not hasattr(EXPECTED_EXCEPTION, 'depth'):
EXPECTED_EXCEPTION.depth = 0
EXPECTED_EXCEPTION.depth += 1
# We only want to wrap the first function wrapped.
if EXPECTED_EXCEPTION.depth > 1:
return f(self, *args, **kwargs)
try:
return f(self, *args, **kwargs)
except designate.exceptions.DesignateException as e:
if e.expected:
raise rpc_dispatcher.ExpectedException()
raise
finally:
EXPECTED_EXCEPTION.depth = 0
return exception_wrapper
return outer
|
|
#!/var/www/django/crm/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver:
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
try:
import readline
except ImportError:
pass # not available on all platforms
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
"""The reconciler for autodeployments.
The reconciler is responsible for launching K8s jobs to deploy
Kubeflow as needed and garbage collecting old instances
Thid is the legacy versions which:
1. Doesn't support blueprints (i.e. uses Deployment Manager)
2. Uses K8s jobs not Tekton PipelineRuns.
"""
import collections
import datetime
from dateutil import parser as date_parser
import fire
import logging
import os
import re
import time
import uuid
import yaml
from kubeflow.testing.auto_deploy import util as auto_deploy_util
from kubeflow.testing import delete_kf_instance
from kubeflow.testing import gcp_util
from kubeflow.testing import git_repo_manager
from kubeflow.testing import kf_logging
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubernetes.client import rest
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
# The minimum time to wait before triggering another deployment.
MIN_TIME_BETWEEN_DEPLOYMENTS = datetime.timedelta(minutes=20)
# The maximum number of active deployments
# TODO(jlewi): Maybe bump this later on.
MAX_ACTIVE_DEPLOYMENTS = 10
KFDEF_URL_TUPLE = collections.namedtuple("KfDefUrlTuple",
("host", "owner", "repo", "branch",
"path"))
KFDEF_PATTERN = re.compile("https://([^/]*)/([^/]*)/([^/]*)/([^/]*)/(.*)")
# Name of various keys in the config file
KFDEF_KEY = "kfDefUrl"
KFCTL_KEY = "kfctlUrl"
VERSIONS_KEY = "versions"
# Durations related to GC
# Minimum amount of time to leave a deployment up before it is eligble for
# deletion. Try to avoid deleting clusters from underneath people and
# tests. We need to leave enough time for any tests running on the cluster
# to finish. Assume 1 hour to setup KF, 1 hour to run tests, 1 hour of buffer
MIN_LIFETIME = datetime.timedelta(hours=3)
# How old must be the next most recent deployment before a given deployment
# is deleted. i.e. if x is older than y then before we delete x we want
# y to be old enough to assume that people have moved over y rather than use
# x
GRACE_PERIOD = datetime.timedelta(hours=3)
# We want to periodically redeploy even if the version hasn't changed
PERIODIC_REDEPLOY = datetime.timedelta(hours=12)
def _parse_kfdef_url(url):
m = KFDEF_PATTERN.match(url)
if not m:
raise ValueError(f"url {url} doesn't match pattern {m.pattern}")
return KFDEF_URL_TUPLE(m.group(1), m.group(2), m.group(3), m.group(4),
m.group(5))
def _kfdef_url_to_clone_url(url):
"""Convert the KFDef URL into the repo to clone"""
# TODO(jlewi): For other hosts how would we determine the repo to clone?
# Maybe make it explicitly set in the config file
if url.host != "raw.githubusercontent.com":
raise ValueError("The code currently assumes KFDef are hosted on "
"raw.githubusercontent.com")
return f"https://github.com/{url.owner}/{url.repo}.git"
def _job_is_running(j):
"""Return true if the specified K8s job is still running.
Args:
j: A K8s job object
"""
conditions = j.status.conditions
if not conditions:
return True
for c in conditions[::-1]:
if c.type.lower() in ["succeeded", "failed", "complete"]:
if c.status.lower() in ["true"]:
return False
return True
class Reconciler: # pylint: disable=too-many-instance-attributes
def __init__(self, manifests_repo=None, config=None, job_template_path=None):
"""Construct a reconciler
Args:
manifests_repo: A GitRepoManager object representing the
kubeflow/manifests repo
config: A dictionary containing the
job_template_path: Path to the YAML file for the K8s job to
launch.
"""
self.config = config
# This is a map:
# Kubeflow version -> List of deployments
self._deployments = None
self._manifests_repo = manifests_repo
self._k8s_client = None
self._job_template_path = job_template_path
# Logging context. A dictionary of extra labels for logs
self._log_context = {}
# If provided this should be a multiprocessing queue on which to
# push info about deployments
self._queue = None
# Directory where YAML files listing deployments should be written.
# This is used to make it available to other processes
self._deployments_dir = None
self._manifests_client = None
@staticmethod
def from_config_file(config_path, job_template_path, deployments_dir,
local_dir=None):
"""Construct a reconciler from the config path.
Args:
config_path: Path to configuration
job_template_path: Path to the YAML file containing a K8s job to
launch to do the deployments.
deployments_dir: Path where YAML should be dumped describing deployments
local_dir: (Optional): Path were repositories should be checked out.
"""
with open(config_path) as f:
config = yaml.load(f)
kfdef_url = _parse_kfdef_url(config[VERSIONS_KEY][0][KFDEF_KEY])
# Ensure there is a single repository; currently the code only handles
# the case where all deployments are from a single URL
for d in config[VERSIONS_KEY][1:]:
new_url = _parse_kfdef_url(d[KFDEF_KEY])
if (new_url.host != kfdef_url.host or new_url.owner != kfdef_url.owner
or new_url.repo != kfdef_url.repo):
raise ValueError(f"All deployments must use the same repo for the KFDef")
url = _kfdef_url_to_clone_url(kfdef_url)
manifests_repo = git_repo_manager.GitRepoManager(url=url,
local_dir=local_dir)
reconciler = Reconciler(config=config, job_template_path=job_template_path,
manifests_repo=manifests_repo)
reconciler._deployments_dir = deployments_dir # pylint: disable=protected-access
logging.info(f"Using deployments directory={reconciler._deployments_dir}") # pylint: disable=protected-access
service_account_path = "/var/run/secrets/kubernetes.io"
if os.path.exists("/var/run/secrets/kubernetes.io"):
logging.info(f"{service_account_path} exists; loading in cluster config")
k8s_config.load_incluster_config()
else:
logging.info(f"{service_account_path} doesn't exists; "
"loading kube config file")
k8s_config.load_kube_config(persist_config=False)
reconciler._k8s_client = k8s_client.ApiClient() # pylint: disable=protected-access
return reconciler
# TODO(jlewi): This was a failed attempt to create a utility function
# to always log the context. It turns out to lead to inconvenient code
# because we need to set the level. I think we want to define
# self.logging which has functions info, debug, warning, error etc...
def _log(self, level, message, *args, **kwargs):
if "extra" not in kwargs:
kwargs["extra"] = {}
kwargs["extra"].update(self._log_context)
logging.log(level, message, *args, **kwargs)
def _save_deployments(self):
if not self._deployments_dir:
logging.info("No deployments directory provided; not persisting "
"deployments")
return
# Write to the deployments to a file in order to make them
# available to all the flask threads and processes
suffix = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
if not os.path.exists(self._deployments_dir):
os.makedirs(self._deployments_dir)
d = {}
for k, v in self._deployments.items():
d[k] = [i.to_dict() for i in v]
path = os.path.join(self._deployments_dir, f"deployments.{suffix}.yaml")
logging.info(f"Writing deployments to {path}")
with open(path, "w") as hf:
yaml.dump(d, hf)
# TODO(jlewi): We should GC old versions of the file.
def _get_deployment_zone(self, deployment_name, manifest_name):
"""Get the zone for a deployment.
Args:
deployment_name: Name of the deployment
manifest_name: Name of the manifest
Returns:
zone:
"""
if not self._manifests_client:
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
self._manifests_client = manifests = dm.manifests()
manifests = self._manifests_client
m = manifests.get(project=self.config['project'],
deployment=deployment_name,
manifest=manifest_name).execute()
dm_config = yaml.load(m["config"]["content"])
zone = dm_config["resources"][0]["properties"]["zone"]
return zone
def _get_deployments(self, deployments=None):
"""Build a map of all deployments
Args:
deployments: (Optional) Iterator over GCP deployments.
"""
logging.info("Building map of auto deployments")
self._deployments = collections.defaultdict(lambda: [])
if not deployments:
deployments = gcp_util.deployments_iterator(self.config["project"])
for d in deployments:
is_auto_deploy = False
# Use labels to identify auto-deployed instances
labels = {}
for label_pair in d.get("labels", []):
# Newer clusters
if label_pair["key"] == "auto-deploy":
is_auto_deploy = True
# Older clusters
if (label_pair["key"] == "purpose" and
label_pair["value"] == "kf-test-cluster"):
is_auto_deploy = True
labels[label_pair["key"]] = label_pair["value"]
if not is_auto_deploy:
logging.info("Skipping deployment %s; its missing the label "
"auto-deploy", d["name"])
continue
if d.get("operation", {}).get("operationType") == "delete":
logging.info(f"Skipping deployment {d['name']} it is being deleted.")
if auto_deploy_util.is_storage_deployment(d["name"]):
logging.info(f"Skipping deployment {d['name']}; it is storage")
continue
version_name = labels.get(auto_deploy_util.AUTO_NAME_LABEL, "unknown")
if not "manifest" in d:
# Since we don't know the manifest we can't get the zone.
# It looks like the manfiest might also be stored in the operation.
# However, it looks like the reason the manifest isn't there
# is because the deployment failed. So we can just set zone to the
# empty string. I think zone only matters for getting cluster
# credentials but since the deployment failed that shouldn't matter.
logging.error(f"Deployment {d['name']} doesn't "
"have a manifest. This typically indicates the "
"deployment failed")
zone = ""
else:
dm_manifest_name = d["manifest"].split("/")[-1]
zone = self._get_deployment_zone(d["name"], dm_manifest_name)
context = {
"deployment_name" : d['name'],
"version_name" : version_name,
}
manifests_branch = labels.get(auto_deploy_util.BRANCH_LABEL, "unknown")
create_time = date_parser.parse(d.get("insertTime"))
deployment = auto_deploy_util.AutoDeployment(manifests_branch=manifests_branch,
create_time=create_time,
deployment_name=d["name"],
labels=labels)
deployment.zone = zone
logging.info(f"Found auto deployment={d['name']} for version={version_name}",
extra=context)
self._deployments[version_name] = (self._deployments[version_name] +
[deployment])
# Sort the values by timestamp
branches = self._deployments.keys()
for b in branches:
self._deployments[b] = sorted(self._deployments[b],
key=lambda x: x.create_time)
self._save_deployments()
def _launch_job(self, config, commit):
"""Launch a K8s job to deploy Kubeflow.
Args:
config: The deployment config; contains the URL of the repo.
commit: The commit to launch from.
"""
with open(self._job_template_path) as f:
job_config = yaml.load(f)
job_config["metadata"]["generateName"] = f"auto-deploy-{config['name']}-"
if os.getenv("JOB_NAMESPACE"):
namespace = os.getenv("JOB_NAMESPACE")
logging.info(f"Setting job namespace to {namespace}",
extra=self._log_context)
job_config["metadata"]["namespace"] = namespace
namespace = job_config["metadata"]["namespace"]
# Check if there is already a running job
label_filter = {
auto_deploy_util.MANIFESTS_COMMIT_LABEL: commit,
}
items = [f"{k}={v}" for k, v in label_filter.items()]
selector = ",".join(items)
# TODO(jlewi): We should switch to using Tekton.
batch_api = k8s_client.BatchV1Api(self._k8s_client)
jobs = batch_api.list_namespaced_job(namespace, label_selector=selector)
if jobs.items:
for j in jobs.items:
logging.info(f"Found job {j.metadata.name}", extra=self._log_context)
if _job_is_running(j):
logging.info(
f"Job {j.metadata.name} is still running; not launching "
f"a new job",
extra=self._log_context)
return
if os.getenv("JOB_NAMESPACE"):
namespace = os.getenv("JOB_NAMESPACE")
logging.info(f"Setting job namespace to {namespace}",
extra=self._log_context)
job_config["metadata"]["namespace"] = namespace
kfdef_url = _parse_kfdef_url(config[KFDEF_KEY])
# Kubeflow deployment name
# We need to keep the name short to avoid hitting limits with certificates.
uid = datetime.datetime.now().strftime("%m%d") + "-"
uid = uid + uuid.uuid4().hex[0:3]
kf_name = f"kf-{config['name']}-{uid}"
labels = {auto_deploy_util.MANIFESTS_COMMIT_LABEL: commit,
auto_deploy_util.BRANCH_LABEL: kfdef_url.branch,
auto_deploy_util.AUTO_NAME_LABEL: config["name"],
"kf-name": kf_name,
}
# Make label value safe
for k, _ in labels.items():
labels[k] = labels[k].replace(".", "-")
job_config["metadata"]["labels"].update(labels)
label_pairs = [f"{k}={v}" for k, v in labels.items()]
labels_value = ",".join(label_pairs)
commit_url = (f"https://{kfdef_url.host}/{kfdef_url.owner}/"
f"{kfdef_url.repo}/{commit}/{kfdef_url.path}")
job_config["spec"]["template"]["spec"]["containers"][0]["command"] = [
"python",
"-m",
"kubeflow.testing.create_unique_kf_instance",
"--apps_dir=/src/apps",
# TODO(jlewi): Should we optionally support building kfctl?
"--kfctl_path=" + config[KFCTL_KEY],
"--kubeflow_repo=",
f"--name=" + kf_name,
f"--project={self.config['project']}",
f"--zone={self.config['zone']}",
"--kfctl_config=" + commit_url,
# The job spec
f"--labels={labels_value}",
# Use self signed certificates otherwise we will have problem
"--use_self_cert",
]
namespace = job_config["metadata"]["namespace"]
# TODO(jlewi): Handle errors
try:
job = batch_api.create_namespaced_job(namespace, job_config)
full_name = f"{namespace}.{job.metadata.name}"
logging.info(f"Submitted job {full_name}",
extra=self._log_context)
except rest.ApiException as e: # pylint: disable=unused-variable
logging.error(f"Could not submit Kubernetes job for deployment {kf_name}"
":\n{e}", extra=self._log_context)
def _gc_deployments(self):
"""Delete old deployments"""
kf_deleter = delete_kf_instance.KFDeleter()
for name, deployments in self._deployments.items():
self._log_context = {
"version_name": name,
}
logging.info(f"Version {name} has {len(deployments)} active deployments",
extra=self._log_context)
# We want at least one deployment for each version
if len(deployments) <= 1:
continue
# deployments should already be sorted by create time.
# we always want to keep at least 1 deployment so we never delete
# the last deployment
for index, d in enumerate(deployments[:-1]):
now = datetime.datetime.now(d.create_time.tzinfo)
age = now - d.create_time
if age < MIN_LIFETIME:
logging.info(f"Deployment {d.deployment_name} not eligible for deletion; "
f"It is only {age} old", extra=self._log_context)
# Since all the other deployments will be younger none of them
# will be eligible
break
# Make sure the next deployment is at least older than the GRACE_PERIOD
# before deleting this one.
next_oldest = deployments[index + 1]
now = datetime.datetime.now(next_oldest.create_time.tzinfo)
next_age = now - next_oldest.create_time
if next_age < GRACE_PERIOD:
logging.info(f"Deployment {d.deployment_name} not eligible for deletion; "
f"The next oldest deployment "
f"{next_oldest.deployment_name} is only "
f"{next_age}(HH:MM:SS) old",
extra=self._log_context)
break
context = {
"deployment_name": d.deployment_name
}
context.update(self._log_context)
logging.info(f"Deleting deployment {d.deployment_name}; age={age} "
f"create_time={d.create_time}", extra=context)
kf_deleter.delete_kf(self.config["project"], d.deployment_name)
def _reconcile(self):
# Get the deployments.
self._get_deployments()
# Compute the current number of deployments
active_deployments = 0
for _, i in self._deployments.items():
active_deployments += len(i)
# Sync the repositories because we use this to find the latest changes.
self._manifests_repo.fetch()
# TODO(jlewi): Stop hardcoding the branch names we should pass this
# in via some sort of config
for config in self.config[VERSIONS_KEY]:
version_name = config["name"]
logging.info(f"Processing version={version_name}")
kf_def_url = _parse_kfdef_url(config[KFDEF_KEY])
self._log_context = {
"version_name": config["name"],
"branch": kf_def_url.branch,
}
self._log(logging.INFO, f"Reconciling deployment {config['name']}")
branch = kf_def_url.branch
full_branch = f"{self._manifests_repo.remote_name}/{branch}"
last_commit = self._manifests_repo.last_commit(full_branch, "")
logging.info(f"Last commit to version={version_name} "
"commit={last_commit}", extra=self._log_context)
# Get the commit of the last deployment for this version
if self._deployments[version_name]:
last_deployed = self._deployments[version_name][-1]
last_deployed_commit = last_deployed.labels.get(
auto_deploy_util.MANIFESTS_COMMIT_LABEL)
now = datetime.datetime.now(tz=last_deployed.create_time.tzinfo)
time_since_last_deploy = now - last_deployed.create_time
logging.info(f"version_name={version_name} "
f"last_commit={last_commit} most recent "
f"deployment is {last_deployed.deployment_name} "
f"at commit={last_deployed_commit} "
f"age={time_since_last_deploy}",
extra=self._log_context)
if (last_deployed_commit == last_commit and
time_since_last_deploy < PERIODIC_REDEPLOY):
logging.info(f"version_name={version_name} no sync needed",
extra=self._log_context)
continue
else:
logging.info(f"version_name={version_name} sync needed",
extra=self._log_context)
if time_since_last_deploy < MIN_TIME_BETWEEN_DEPLOYMENTS:
minutes = time_since_last_deploy.total_seconds() / 60.0
logging.info(f"version_name={version_name} can't start a new deployment "
f"because deployment for {last_deployed.deployment_name }"
f"is only {minutes} minutes old", extra=self._log_context)
continue
else:
logging.info(f"version_name={version_name} has no active deployments",
extra=self._log_context)
if active_deployments >= MAX_ACTIVE_DEPLOYMENTS:
logging.info(f"version_name={version_name} can't start a new deployment "
f"there are currently {active_deployments} active "
f"deployments already.", extra=self._log_context)
continue
self._launch_job(config, last_commit)
# TODO(jlewi): We should GC the older deployments. We should have
# some min TTL so we don't delete clusters from underneath people.
# We should then GC any clusters as long as there as a newer cluster
# already available. We should require that the new cluster is at least
# 30 minutes old so that we know its ready.
self._gc_deployments()
def run(self, period=datetime.timedelta(minutes=5)):
"""Continuously reconcile."""
# Ensure we can get GCP credentials
if not gcp_util.get_gcp_credentials():
raise RuntimeError("Could not get GCP application default credentials")
while True:
self._reconcile()
logging.info(f"Wait {period}(HH:MM:SS) before reconciling; ")
time.sleep(period.total_seconds())
class CLI:
@staticmethod
def run(config_path, job_template_path, deployments_dir, local_dir=None):
reconciler = Reconciler.from_config_file(config_path, job_template_path,
deployments_dir=deployments_dir,
local_dir=local_dir)
reconciler.run()
if __name__ == "__main__":
# Emit logs in json format. This way we can do structured logging
# and we can query extra fields easily in stackdriver and bigquery.
json_handler = logging.StreamHandler()
json_handler.setFormatter(kf_logging.CustomisedJSONFormatter())
logger = logging.getLogger()
logger.addHandler(json_handler)
logger.setLevel(logging.INFO)
fire.Fire(CLI)
|
|
import json
import requests
import time
import auth
import util
from instance import Instance
from exceptions import AlaudaServerError
MAX_RETRY_NUM = 10
INSTANCE_SIZES = ['XXS', 'XS', 'S', 'M', 'L', 'XL']
class Service(object):
def __init__(self, name, image_name, image_tag, target_num_instances=1, instance_size='XS', run_command='',
instance_ports=[], instance_envvars={}, volumes=[], links=[], details='', namespace=None,
scaling_mode='MANUAL', autoscaling_config={}, custom_domain_name='', region_name=None):
self.name = name
self.image_name = image_name
self.image_tag = image_tag
self.target_num_instances = target_num_instances
self.instance_size = instance_size
if instance_size not in INSTANCE_SIZES:
raise AlaudaServerError(400, 'instance_size must be one of {}'.format(INSTANCE_SIZES))
self.run_command = run_command
self.instance_envvars = instance_envvars
self.instance_ports = instance_ports
self.volumes = volumes
self.links = links
self.details = details
self.custom_domain_name = custom_domain_name
self.api_endpoint, self.token, self.username = auth.load_token()
self.headers = auth.build_headers(self.token)
self.namespace = namespace or self.username
self.scaling_mode = scaling_mode
self.autoscaling_config = autoscaling_config
self.region_name = region_name
def _update_envvars_with_links(self, instance_envvars, links, namespace=None):
linked_to = {}
if links is not None:
for link in links:
service_name = link[0]
alias = link[1]
linked_to[service_name] = alias
retry_num = 0
while retry_num < MAX_RETRY_NUM:
linked_service = Service.fetch(service_name, namespace)
linked_service_data = json.loads(linked_service.details)
linked_service_ports = linked_service_data['instance_ports']
if len(linked_service_ports) == 0:
break
# linked_service_envvars = json.loads(linked_service_data['instance_envvars'])
# linked_service_addr = linked_service_envvars['__DEFAULT_DOMAIN_NAME__']
key = '{0}_PORT'.format(alias).upper()
for port in linked_service_ports:
service_port = port.get('service_port')
if service_port is None:
retry_num = retry_num + 1
time.sleep(1)
break
retry_num = MAX_RETRY_NUM + 1
url = '{0}://{1}:{2}'.format(port['protocol'], port['default_domain'], service_port)
if key not in instance_envvars.keys():
instance_envvars[key] = url
pattern = '{0}_PORT_{1}_{2}'.format(alias, port['container_port'], port['protocol']).upper()
instance_envvars[pattern] = url
instance_envvars[pattern + '_ADDR'] = port['default_domain']
instance_envvars[pattern + '_PORT'] = str(service_port)
instance_envvars[pattern + '_PROTO'] = port['protocol']
if retry_num == MAX_RETRY_NUM:
raise AlaudaServerError(500, 'Timed out waiting for {} to acquire service port'.format(service_name))
return linked_to
def _create_remote(self, target_state):
linked_to = self._update_envvars_with_links(self.instance_envvars, self.links, self.namespace)
util.expand_environment(self.instance_envvars)
url = self.api_endpoint + 'services/{}/'.format(self.namespace)
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
"image_name": self.image_name,
"image_tag": self.image_tag,
"instance_size": self.instance_size,
"scaling_mode": "MANUAL",
"target_state": target_state,
"run_command": self.run_command,
"instance_envvars": self.instance_envvars,
"instance_ports": self.instance_ports,
'linked_to_apps': linked_to,
"volumes": self.volumes,
'scaling_mode': self.scaling_mode,
'autoscaling_config': self.autoscaling_config,
'custom_domain_name': self.custom_domain_name
}
if self.region_name:
payload['region_name'] = self.region_name
r = requests.post(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
@classmethod
def fetch(cls, name, namespace=None):
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/'.format(namespace or username) + name
headers = auth.build_headers(token)
r = requests.get(url, headers=headers)
util.check_response(r)
data = json.loads(r.text)
service = cls(name=data['service_name'],
image_name=data['image_name'],
image_tag=data['image_tag'],
target_num_instances=data['target_num_instances'],
instance_size=data['instance_size'],
details=r.text,
namespace=data['namespace'])
return service
@classmethod
def list(cls, namespace, page):
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/?page={}'.format(namespace or username, page)
headers = auth.build_headers(token)
r = requests.get(url, headers=headers)
util.check_response(r)
service_list = []
services = json.loads(r.text)
services = services.get('results', [])
for data in services:
try:
service = Service.fetch(data['service_name'], namespace)
service_list.append(service)
except AlaudaServerError:
continue
return service_list
@classmethod
def remove(cls, name, namespace=None):
print '[alauda] Removing service "{}"'.format(name)
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/'.format(namespace or username) + name
headers = auth.build_headers(token)
try:
r = requests.delete(url, headers=headers)
util.check_response(r)
except AlaudaServerError as ex:
if ex.status_code == 404:
print '[alauda] Service "{}" does not exist'.format(name)
else:
raise ex
def create(self):
print '[alauda] Creating service "{}"'.format(self.name)
self._create_remote('STOPPED')
def run(self):
print '[alauda] Creating and starting service "{}"'.format(self.name)
self._create_remote('STARTED')
def inspect(self):
if not self.details:
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
r = requests.get(url, headers=self.headers)
util.check_response(r)
self.details = r.text
return self.details
def start(self):
print '[alauda] Starting service "{}"'.format(self.name)
self.target_state = 'STARTED'
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name + '/start/'
r = requests.put(url, headers=self.headers)
util.check_response(r)
def stop(self):
print '[alauda] Stopping service "{}"'.format(self.name)
self.target_state = 'STOPPED'
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name + '/stop/'
r = requests.put(url, headers=self.headers)
util.check_response(r)
def scale(self, target_num_instances):
self.target_num_instances = target_num_instances
print '[alauda] Scaling service: {0} -> {1}'.format(self.name, self.target_num_instances)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def enable_autoscaling(self, autoscaling_config):
print '[alauda] Enabling auto-scaling for {0}'.format(self.name)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"scaling_mode": 'AUTO',
"autoscaling_config": autoscaling_config,
'app_name': self.name
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def disable_autoscaling(self, target_num_instances):
if target_num_instances is not None:
self.target_num_instances = target_num_instances
print '[alauda] Disabling auto-scaling for {0}. Target number of instances: {1}'.format(self.name, self.target_num_instances)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
'scaling_mode': 'MANUAL'
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def logs(self, start_time, end_time):
start, end = util.parse_time(start_time, end_time)
url = self.api_endpoint + 'services/{0}/{1}/logs?start_time={2}&end_time={3}'.format(self.namespace, self.name, start, end)
r = requests.get(url, headers=self.headers)
util.check_response(r)
return r.text
def get_run_command(self):
data = json.loads(self.details)
run_command = data['run_command']
if not run_command:
run_command = ' '
return run_command
def get_state(self):
data = json.loads(self.details)
return data.get('current_status')
def get_ports(self):
ports = ''
data = json.loads(self.details)
if not data['instance_ports']:
return ' '
for port in data['instance_ports']:
instance_envvars = json.loads(data['instance_envvars'])
ports = ports + '{0}:{1}->{2}/{3}, '.format(instance_envvars['__DEFAULT_DOMAIN_NAME__'],
port.get('service_port', ''),
port['container_port'],
port['protocol'])
return ports[:len(ports) - 2]
def get_instance(self, id):
url = self.api_endpoint + 'services/{0}/{1}/instances/{2}'.format(self.namespace, self.name, id)
r = requests.get(url, headers=self.headers)
util.check_response(r)
data = json.loads(r.text)
instance = Instance(service=self, uuid=data['uuid'], details=r.text)
return instance
def list_instances(self):
url = self.api_endpoint + 'services/{0}/{1}/instances/'.format(self.namespace, self.name)
r = requests.get(url, headers=self.headers)
util.check_response(r)
data = json.loads(r.text)
instance_list = []
for instance in data:
instance = Instance(service=self, uuid=instance['uuid'], details=json.dumps(instance))
instance_list.append(instance)
return instance_list
|
|
from datetime import datetime
from django.urls import reverse
from workshops.models import Person, Award, Badge, TrainingProgress, \
TrainingRequirement
from workshops.tests.base import TestBase
class TestTraineeDashboard(TestBase):
"""Tests for trainee dashboard."""
def setUp(self):
self.user = Person.objects.create_user(
username='user', personal='', family='',
email='user@example.org', password='pass')
self.user.data_privacy_agreement = True
self.user.save()
self.client.login(username='user', password='pass')
def test_dashboard_loads(self):
rv = self.client.get(reverse('trainee-dashboard'))
self.assertEqual(rv.status_code, 200)
content = rv.content.decode('utf-8')
self.assertIn("Log out", content)
self.assertIn("Update your profile", content)
class TestInstructorStatus(TestBase):
"""Test that trainee dashboard displays information about awarded SWC/DC
Instructor badges."""
def setUp(self):
self._setUpUsersAndLogin()
self._setUpBadges()
self.progress_url = reverse('training-progress')
def test_swc_dc_lc_instructor_badges(self):
"""When the trainee is awarded both Carpentry Instructor badge,
we want to display that info in the dashboard."""
Award.objects.create(person=self.admin, badge=self.swc_instructor,
awarded=datetime(2016, 6, 1, 15, 0))
Award.objects.create(person=self.admin, badge=self.dc_instructor,
awarded=datetime(2016, 6, 1, 15, 0))
Award.objects.create(person=self.admin, badge=self.lc_instructor,
awarded=datetime(2018, 12, 25, 20, 16))
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Software Carpentry Instructor')
self.assertContains(rv, 'Data Carpentry Instructor')
self.assertContains(rv, 'Library Carpentry Instructor')
self.assertIn(self.swc_instructor,
rv.context['user'].instructor_badges)
self.assertIn(self.dc_instructor, rv.context['user'].instructor_badges)
self.assertIn(self.lc_instructor, rv.context['user'].instructor_badges)
def test_swc_instructor(self):
Award.objects.create(person=self.admin, badge=self.swc_instructor,
awarded=datetime(2016, 6, 1, 15, 0))
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Software Carpentry Instructor')
self.assertIn(self.swc_instructor,
rv.context['user'].instructor_badges)
def test_dc_instructor(self):
Award.objects.create(person=self.admin, badge=self.dc_instructor,
awarded=datetime(2016, 6, 1, 15, 0))
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Data Carpentry Instructor')
self.assertIn(self.dc_instructor, rv.context['user'].instructor_badges)
def test_lc_instructor(self):
Award.objects.create(person=self.admin, badge=self.lc_instructor,
awarded=datetime(2018, 12, 25, 20, 16))
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Library Carpentry Instructor')
self.assertIn(self.lc_instructor, rv.context['user'].instructor_badges)
def test_neither_swc_nor_dc_instructor(self):
"""Check that we don't display that the trainee is an instructor if
they don't have appropriate badge."""
rv = self.client.get(self.progress_url)
self.assertNotContains(rv, 'Congratulations, you\'re certified both '
'Software Carpentry and Data Carpentry '
'Instructor!')
self.assertNotContains(rv, 'Congratulations, you\'re certified '
'Software Carpentry Instructor!')
self.assertNotContains(rv, 'Congratulations, you\'re certified '
'Data Carpentry Instructor!')
def test_eligible_but_not_awarded(self):
"""Test what is dispslayed when a trainee is eligible to be certified
as an SWC/DC Instructor, but doesn't have appropriate badge awarded
yet."""
requirements = ['Training', 'SWC Homework', 'DC Homework',
'Discussion', 'SWC Demo', 'DC Demo']
for requirement in requirements:
TrainingProgress.objects.create(
trainee=self.admin,
requirement=TrainingRequirement.objects.get(name=requirement))
admin = Person.objects.annotate_with_instructor_eligibility() \
.get(username='admin')
assert admin.get_missing_instructor_requirements() == []
rv = self.client.get(self.progress_url)
self.assertNotContains(rv, 'Congratulations, you\'re certified both '
'Software Carpentry and Data Carpentry '
'Instructor!')
self.assertNotContains(rv, 'Congratulations, you\'re certified '
'Software Carpentry Instructor!')
self.assertNotContains(rv, 'Congratulations, you\'re certified '
'Data Carpentry Instructor!')
class TestInstructorTrainingStatus(TestBase):
"""Test that trainee dashboard displays status of passing Instructor
Training."""
def setUp(self):
self._setUpUsersAndLogin()
self.training = TrainingRequirement.objects.get(name='Training')
self.progress_url = reverse('training-progress')
def test_training_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.training)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Training passed')
def test_training_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.training, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Training not passed yet')
def test_last_training_discarded_but_another_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.training)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.training, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Training passed')
def test_training_failed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.training, state='f')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Training not passed yet')
def test_training_not_finished(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Training not passed yet')
class TestSWCHomeworkStatus(TestBase):
"""Test that trainee dashboard displays status of passing SWC Homework.
Test that SWC homework submission form works."""
def setUp(self):
self._setUpUsersAndLogin()
self.homework = TrainingRequirement.objects.get(name='SWC Homework')
self.progress_url = reverse('training-progress')
def test_homework_not_submitted(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Homework not submitted yet')
def test_homework_waiting_to_be_evaluated(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, state='n')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Homework evaluation pending')
def test_homework_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Homework accepted')
def test_homework_not_accepted_when_homework_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Homework not submitted yet')
def test_homework_is_accepted_when_last_homework_is_discarded_but_other_one_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Homework accepted')
def test_submission_form(self):
data = {
'url': 'http://example.com',
'requirement': self.homework.pk,
}
rv = self.client.post(self.progress_url, data, follow=True)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.resolver_match.view_name, 'training-progress')
self.assertContains(rv, 'Your homework submission will be evaluated '
'soon.')
got = list(TrainingProgress.objects.values_list(
'state', 'trainee', 'url', 'requirement'))
expected = [(
'n',
self.admin.pk,
'http://example.com',
TrainingRequirement.objects.get(name='SWC Homework').pk,
)]
self.assertEqual(got, expected)
class TestDCHomeworkStatus(TestBase):
"""Test that trainee dashboard displays status of passing DC Homework.
Test that DC homework submission form works."""
def setUp(self):
self._setUpUsersAndLogin()
self.homework = TrainingRequirement.objects.get(name='DC Homework')
self.progress_url = reverse('training-progress')
def test_homework_not_submitted(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Homework not submitted yet')
def test_homework_waiting_to_be_evaluated(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, state='n')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Homework evaluation pending')
def test_homework_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Homework accepted')
def test_homework_not_accepted_when_homework_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Homework not submitted yet')
def test_homework_is_accepted_when_last_homework_is_discarded_but_other_one_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Homework accepted')
def test_submission_form(self):
data = {
'url': 'http://example.com',
'requirement': self.homework.pk,
}
rv = self.client.post(self.progress_url, data, follow=True)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.resolver_match.view_name, 'training-progress')
self.assertContains(rv, 'Your homework submission will be evaluated '
'soon.')
got = list(TrainingProgress.objects.values_list(
'state', 'trainee', 'url', 'requirement'))
expected = [(
'n',
self.admin.pk,
'http://example.com',
TrainingRequirement.objects.get(name='DC Homework').pk,
)]
self.assertEqual(got, expected)
class TestLCHomeworkStatus(TestBase):
"""Test that trainee dashboard displays status of passing LC Homework.
Test that LC homework submission form works."""
def setUp(self):
self._setUpUsersAndLogin()
self.homework = TrainingRequirement.objects.get(name='LC Homework')
self.progress_url = reverse('training-progress')
def test_homework_not_submitted(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Homework not submitted yet')
def test_homework_waiting_to_be_evaluated(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, state='n')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Homework evaluation pending')
def test_homework_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Homework accepted')
def test_homework_not_accepted_when_homework_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Homework not submitted yet')
def test_homework_is_accepted_when_last_homework_is_discarded_but_other_one_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.homework, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Homework accepted')
def test_submission_form(self):
data = {
'url': 'http://example.com',
'requirement': self.homework.pk,
}
rv = self.client.post(self.progress_url, data, follow=True)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.resolver_match.view_name, 'training-progress')
self.assertContains(rv, 'Your homework submission will be evaluated '
'soon.')
got = list(TrainingProgress.objects.values_list(
'state', 'trainee', 'url', 'requirement'))
expected = [(
'n',
self.admin.pk,
'http://example.com',
TrainingRequirement.objects.get(name='LC Homework').pk,
)]
self.assertEqual(got, expected)
class TestDiscussionSessionStatus(TestBase):
"""Test that trainee dashboard displays status of passing Discussion
Session. Test whether we display instructions for registering for a
session. """
def setUp(self):
self._setUpUsersAndLogin()
self.discussion = TrainingRequirement.objects.get(name='Discussion')
self.progress_url = reverse('training-progress')
def test_session_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.discussion)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Discussion Session passed')
def test_session_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.discussion, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Discussion Session not passed yet')
def test_last_session_discarded_but_another_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.discussion)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.discussion, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Discussion Session passed')
def test_session_failed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.discussion, state='f')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Discussion Session not passed yet')
def test_no_participation_in_a_session_yet(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'Discussion Session not passed yet')
class TestDemoSessionStatus(TestBase):
"""Test that trainee dashboard displays status of passing SWC/DC Demo
Session. Test whether we display instructions for registering for a
session."""
def setUp(self):
self._setUpUsersAndLogin()
self.swc_demo = TrainingRequirement.objects.get(name='SWC Demo')
self.dc_demo = TrainingRequirement.objects.get(name='DC Demo')
self.lc_demo = TrainingRequirement.objects.get(name='LC Demo')
self.progress_url = reverse('training-progress')
def test_swc_session_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_swc_session_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_swc_last_session_discarded_but_another_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_swc_session_failed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo, state='f')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_no_participation_in_a_swc_session_yet(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_dc_session_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_dc_session_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_dc_last_session_discarded_but_another_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_dc_session_failed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo, state='f')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_no_participation_in_a_dc_session_yet(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'DC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_lc_session_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_lc_session_passed_but_discarded(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_lc_last_session_discarded_but_another_is_passed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo, discarded=True)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Demo Session passed')
self.assertContains(rv, 'You can register for Demo Session on')
def test_lc_session_failed(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo, state='f')
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_no_participation_in_a_lc_session_yet(self):
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'LC Demo Session not passed yet')
self.assertContains(rv, 'You can register for Demo Session on')
def test_no_registration_instruction_when_trainee_passed_both_all_sessions(self):
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.swc_demo)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.dc_demo)
TrainingProgress.objects.create(
trainee=self.admin, requirement=self.lc_demo)
rv = self.client.get(self.progress_url)
self.assertContains(rv, 'SWC Demo Session passed')
self.assertContains(rv, 'DC Demo Session passed')
self.assertContains(rv, 'LC Demo Session passed')
self.assertNotContains(rv, 'You can register for Demo Session on')
|
|
#-*- coding: utf-8 -*-
import re
from django.contrib.contenttypes.models import ContentType
from django.template import (Library, Node, TemplateSyntaxError,
Variable, loader, RequestContext)
from django.utils.safestring import mark_safe
from django_comments_xtd import get_model as get_comment_model
from ..utils import import_formatter
XtdComment = get_comment_model()
formatter = import_formatter()
register = Library()
class XtdCommentCountNode(Node):
"""Store the number of XtdComments for the given list of app.models"""
def __init__(self, as_varname, content_types):
"""Class method to parse get_xtdcomment_list and return a Node."""
self.as_varname = as_varname
self.qs = XtdComment.objects.for_content_types(content_types)
def render(self, context):
context[self.as_varname] = self.qs.count()
return ''
def get_xtdcomment_count(parser, token):
"""
Gets the comment count for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_xtdcomment_count as [varname] for [app].[model] [[app].[model]] %}
Example usage::
{% get_xtdcomment_count as comments_count for blog.story blog.quote %}
"""
tokens = token.contents.split()
if tokens[1] != 'as':
raise TemplateSyntaxError("2nd. argument in %r tag must be 'for'" % tokens[0])
as_varname = tokens[2]
if tokens[3] != 'for':
raise TemplateSyntaxError("4th. argument in %r tag must be 'for'" % tokens[0])
content_types = _get_content_types(tokens[0], tokens[4:])
return XtdCommentCountNode(as_varname, content_types)
class BaseLastXtdCommentsNode(Node):
"""Base class to deal with the last N XtdComments for a list of app.model"""
def __init__(self, count, content_types, template_path=None):
"""Class method to parse get_xtdcomment_list and return a Node."""
try:
self.count = int(count)
except:
self.count = Variable(count)
self.content_types = content_types
self.template_path = template_path
class RenderLastXtdCommentsNode(BaseLastXtdCommentsNode):
def render(self, context):
if not isinstance(self.count, int):
self.count = int( self.count.resolve(context) )
self.qs = XtdComment.objects.for_content_types(self.content_types)[:self.count]
strlist = []
for xtd_comment in self.qs:
if self.template_path:
template_arg = self.template_path
else:
template_arg = [
"django_comments_xtd/%s/%s/comment.html" % (
xtd_comment.content_type.app_label,
xtd_comment.content_type.model),
"django_comments_xtd/%s/comment.html" % (
xtd_comment.content_type.app_label,),
"django_comments_xtd/comment.html"
]
strlist.append(
loader.render_to_string(
template_arg, {"comment": xtd_comment}, context))
return ''.join(strlist)
class GetLastXtdCommentsNode(BaseLastXtdCommentsNode):
def __init__(self, count, as_varname, content_types):
super(GetLastXtdCommentsNode, self).__init__(count, content_types)
self.as_varname = as_varname
def render(self, context):
if not isinstance(self.count, int):
self.count = int( self.count.resolve(context) )
self.qs = XtdComment.objects.for_content_types(self.content_types)[:self.count]
context[self.as_varname] = self.qs
return ''
def _get_content_types(tagname, tokens):
content_types = []
for token in tokens:
try:
app, model = token.split('.')
content_types.append(
ContentType.objects.get(app_label=app, model=model))
except ValueError:
raise TemplateSyntaxError(
"Argument %s in %r must be in the format 'app.model'" % (
token, tagname))
except ContentType.DoesNotExist:
raise TemplateSyntaxError(
"%r tag has non-existant content-type: '%s.%s'" % (
tagname, app, model))
return content_types
def render_last_xtdcomments(parser, token):
"""
Render the last N XtdComments through the
``comments_xtd/comment.html`` template
Syntax::
{% render_last_xtdcomments [N] for [app].[model] [[app].[model]] using [template] %}
Example usage::
{% render_last_xtdcomments 5 for blog.story blog.quote using "comments/blog/comment.html" %}
"""
tokens = token.contents.split()
try:
count = tokens[1]
except ValueError:
raise TemplateSyntaxError(
"Second argument in %r tag must be a integer" % tokens[0])
if tokens[2] != 'for':
raise TemplateSyntaxError(
"Third argument in %r tag must be 'for'" % tokens[0])
try:
token_using = tokens.index("using")
content_types = _get_content_types(tokens[0], tokens[3:token_using])
try:
template = tokens[token_using+1].strip('" ')
except IndexError:
raise TemplateSyntaxError(
"Last argument in %r tag must be a relative template path" % tokens[0])
except ValueError:
content_types = _get_content_types(tokens[0], tokens[3:])
template = None
return RenderLastXtdCommentsNode(count, content_types, template)
def get_last_xtdcomments(parser, token):
"""
Get the last N XtdComments
Syntax::
{% get_last_xtdcomments [N] as [varname] for [app].[model] [[app].[model]] %}
Example usage::
{% get_last_xtdcomments 5 as last_comments for blog.story blog.quote %}
"""
tokens = token.contents.split()
try:
count = int(tokens[1])
except ValueError:
raise TemplateSyntaxError(
"Second argument in %r tag must be a integer" % tokens[0])
if tokens[2] != 'as':
raise TemplateSyntaxError(
"Third argument in %r tag must be 'as'" % tokens[0])
as_varname = tokens[3]
if tokens[4] != 'for':
raise TemplateSyntaxError(
"Fifth argument in %r tag must be 'for'" % tokens[0])
content_types = _get_content_types(tokens[0], tokens[5:])
return GetLastXtdCommentsNode(count, as_varname, content_types)
def render_markup_comment(value):
"""
Renders a comment using a markup language specified in the first line of the comment.
Template Syntax::
{{ comment.comment|render_markup_comment }}
The first line of the comment field must start with the name of the markup language.
A comment like::
comment = r'''#!markdown\n\rAn [example](http://url.com/ "Title")'''
Would be rendered as a markdown text, producing the output::
<p><a href="http://url.com/" title="Title">example</a></p>
"""
lines = value.splitlines()
rawstr = r"""^#!(?P<markup_filter>\w+)$"""
match_obj = re.search(rawstr, lines[0])
if match_obj:
markup_filter = match_obj.group('markup_filter')
try:
if formatter:
return mark_safe(formatter("\n".join(lines[1:]), filter_name=markup_filter))
else:
raise TemplateSyntaxError(
"In order to use this templatetag you need django-markup, docutils and markdown installed")
except ValueError as exc:
output = "<p>Warning: %s</p>" % exc
return output + value
else:
return value
register.tag(get_xtdcomment_count)
register.tag(render_last_xtdcomments)
register.tag(get_last_xtdcomments)
register.filter(render_markup_comment)
|
|
import unittest
import sys
from unittest.test.support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite()),
(unittest.TestSuite(), unittest.TestSuite([])),
(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1')),
(unittest.TestSuite([]), _mk_TestSuite('test_1')),
(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3')),
(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
|
|
from StringIO import StringIO
import pickle
import sys
import gc
import copy
from os import path
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
import warnings
import tempfile
import numpy as np
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
@dec.knownfailureif(sys.platform == 'cli',
"Pickling transposed arrays is not yet implemented")
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assert_(a[1] == 'auto')
self.assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assert_(b != 'auto')
self.assert_(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif(sys.version_info[0] >= 3,
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
@dec.knownfailureif(sys.platform == 'cli',
"overflow not thrown when setting MSBit of signed int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert(np.all(a[ya] > 0.5))
assert(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert(type(a.squeeze()) is np.ndarray)
assert(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
"""
print "Ticket #160 disabled - np.rec.array()"
pass
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
"""
print "#202 disabled - np.rec.array()"
pass
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(str(c),"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert abs(arr-res2).max() < 1e-8, func
else:
assert abs(res1-res2).max() < 1e-8, func
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert abs(res1-res2).max() < 1e-8, func
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert ra['x'] != rb['x']
"""
print "Ticket #312 disabled - np.rec.array()"
pass
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert np.dtype('i4') == np.dtype(('i4',()))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert a.argmax() == 2
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert(a.dtype in [dt0,dt1])
"""
print "#372 disabled - np.rec.array()"
pass
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
if sys.platform != 'cli':
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
@dec.skipif(sys.platform == 'cli',
"asbytes() doesn't work correctly on IronPython")
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert asbytes(r['var1'][0][0]) == asbytes('abc')
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert a[a.argmax()] == a.max()
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s): x[(s>0)]=1.0
self.assertRaises(ValueError,ia,x,s)
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
""" TODO: Fails on IronPython because 'double' and 'longdouble' are the
same size and formatting of the two is different in numpy. Not sure
how 32-bit versions are supposed to work. """
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
if sys.platform == 'cli':
assert_equal(str(np.abs(x)),'[0.0 0.0 0.0]')
else:
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount') or sys.platform == 'cli':
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert cnt(a) == cnt0_a + 5
arr[:] = b
assert cnt(a) == cnt0_a
assert cnt(b) == cnt0_b + 5
arr[:2] = c
assert cnt(b) == cnt0_b + 3
assert cnt(c) == cnt0_c + 2
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert cnt(a) == cnt0_a + 1
arr0[1] = b
assert cnt(b) == cnt0_b + 1
arr[:,:] = arr0
assert cnt(a) == cnt0_a + 6
assert cnt(b) == cnt0_b + 6
arr[:,0] = None
assert cnt(a) == cnt0_a + 1
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr2 = arr.copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
arr2 = arr[:,0].copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 5
arr2 = arr.flatten()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr3 = np.concatenate((arr1, arr2))
assert cnt(a) == cnt0_a + 5 + 5
assert cnt(b) == cnt0_b + 5 + 5
arr3 = arr1.repeat(3, axis=0)
assert cnt(a) == cnt0_a + 5 + 3*5
arr3 = arr1.take([1,2,3], axis=0)
assert cnt(a) == cnt0_a + 5 + 3
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert cnt(a) == cnt0_a + 5 + 2
assert cnt(b) == cnt0_b + 5 + 3
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
if sys.platform == 'cli':
return
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert not arr[0].deleted
arr[:] = arr # trying to induce a segfault by doing it again...
assert not arr[0].deleted
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert not np.any(a)
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_complex_dtype_printing(self, level=rlevel):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
xp = pickle.load(open(filename, 'rb'), encoding='latin1')
else:
xp = pickle.load(open(filename))
xpd = xp.astype(np.float64)
assert (xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0])
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
import numpy as np
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert dat.info == 'jubba'
dat.resize((4,2))
assert dat.info == 'jubba'
dat.sort()
assert dat.info == 'jubba'
dat.fill(2)
assert dat.info == 'jubba'
dat.put([2,3,4],[6,3,4])
assert dat.info == 'jubba'
dat.setfield(4, np.int32,0)
assert dat.info == 'jubba'
dat.setflags()
assert dat.info == 'jubba'
assert dat.all(1).info == 'jubba'
assert dat.any(1).info == 'jubba'
assert dat.argmax(1).info == 'jubba'
assert dat.argmin(1).info == 'jubba'
assert dat.argsort(1).info == 'jubba'
assert dat.astype(TestArray).info == 'jubba'
assert dat.byteswap().info == 'jubba'
assert dat.clip(2,7).info == 'jubba'
assert dat.compress([0,1,1]).info == 'jubba'
assert dat.conj().info == 'jubba'
assert dat.conjugate().info == 'jubba'
assert dat.copy().info == 'jubba'
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert dat2.choose(choices).info == 'jubba'
assert dat.cumprod(1).info == 'jubba'
assert dat.cumsum(1).info == 'jubba'
assert dat.diagonal().info == 'jubba'
assert dat.flatten().info == 'jubba'
assert dat.getfield(np.int32,0).info == 'jubba'
assert dat.imag.info == 'jubba'
assert dat.max(1).info == 'jubba'
assert dat.mean(1).info == 'jubba'
assert dat.min(1).info == 'jubba'
assert dat.newbyteorder().info == 'jubba'
assert dat.nonzero()[0].info == 'jubba'
assert dat.nonzero()[1].info == 'jubba'
assert dat.prod(1).info == 'jubba'
assert dat.ptp(1).info == 'jubba'
assert dat.ravel().info == 'jubba'
assert dat.real.info == 'jubba'
assert dat.repeat(2).info == 'jubba'
assert dat.reshape((2,4)).info == 'jubba'
assert dat.round().info == 'jubba'
assert dat.squeeze().info == 'jubba'
assert dat.std(1).info == 'jubba'
assert dat.sum(1).info == 'jubba'
assert dat.swapaxes(0,1).info == 'jubba'
assert dat.take([2,3,5]).info == 'jubba'
assert dat.transpose().info == 'jubba'
assert dat.T.info == 'jubba'
assert dat.var(1).info == 'jubba'
assert dat.view(TestArray).info == 'jubba'
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
"""
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert( a[0].tolist() == b[0])
assert( a[1].tolist() == b[1])
"""
print "#793 disabled - np.recarray()"
pass
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3 or sys.platform == 'cli':
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert a.dtype.names[0] == "notfoo"
assert a.dtype.names[1] == "bar"
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert a is None
assert type(b) is int
assert type(b2) is float
assert type(c) is np.ndarray
assert c.dtype == object
assert d.dtype == object
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
if sys.platform != 'cli':
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert n_before >= n_after, (n_before, n_after)
finally:
np.seterr(**old_err)
else:
print "Skipped test 955: gc.get_objects() is not implemented on this platform."
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert arr[0][0] == 'john'
assert arr[0][1] == 4
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert np.all(z == 0)
assert z.shape == (m, n)
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.ones(2)
b = np.array(3)
assert_raises(ValueError, lambda: np.array((a, b)))
t = ((1,), np.array(1))
assert_raises(ValueError, lambda: np.array(t))
@dec.knownfailureif(True, "This is a corner case, see ticket #1081.")
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_raises(ValueError, lambda: np.array(t))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [2**10]*10)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert a.itemsize == 16
a = np.array([u'123', '1234'])
assert a.itemsize == 16
a = np.array(['1234', u'123', '12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'1234'])
assert a.itemsize == 16
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert x[0,1] == x[0,0]
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
# Only makes sense for reference counted platforms like CPython
if not hasattr(sys, 'getrefcount') or sys.platform == 'cli':
return
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert sys.getrefcount(strb) == numb
assert sys.getrefcount(stra) == numa + 2
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min /= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
@dec.skipif(sys.platform == 'cli',
"Requires buffer support for md5 to access internal representation - "
"not currently supported by numpy or md5 module.")
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timeinteger))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
if sys.platform == 'cli':
# Not applicable to IronPython
return
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
if sys.platform == 'cli':
import System.GC
def cnt(a):
System.GC.Collect()
return a.__coreRefCount__
else:
def cnt(a): (sys.getrefcount(a), a.__coreRefCount__)
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = cnt(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = cnt(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f = tempfile.TemporaryFile()
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
@dec.knownfailureif(sys.platform == 'cli',
"Warnings are emited but assert_warns doesn't detect it")
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
ctx = WarningManager()
ctx.__enter__()
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
ctx.__exit__()
@dec.knownfailureif(sys.platform == 'cli',
"complex constructor fails even though implicit conversion to string"
"of scalar complex values exists/works")
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
if __name__ == "__main__":
run_module_suite()
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: pipes.py
"""Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
"""
import re
import os
import tempfile
import string
__all__ = [
'Template']
FILEIN_FILEOUT = 'ff'
STDIN_FILEOUT = '-f'
FILEIN_STDOUT = 'f-'
STDIN_STDOUT = '--'
SOURCE = '.-'
SINK = '-.'
stepkinds = [
FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT,
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError, 'Template.append: cmd must be a string'
if kind not in stepkinds:
raise ValueError, 'Template.append: bad kind %r' % (kind,)
if kind == SOURCE:
raise ValueError, 'Template.append: SOURCE can only be prepended'
if self.steps and self.steps[-1][1] == SINK:
raise ValueError, 'Template.append: already ends with SINK'
if kind[0] == 'f' and not re.search('\\$IN\\b', cmd):
raise ValueError, 'Template.append: missing $IN in cmd'
if kind[1] == 'f' and not re.search('\\$OUT\\b', cmd):
raise ValueError, 'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError, 'Template.prepend: cmd must be a string'
if kind not in stepkinds:
raise ValueError, 'Template.prepend: bad kind %r' % (kind,)
if kind == SINK:
raise ValueError, 'Template.prepend: SINK can only be appended'
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError, 'Template.prepend: already begins with SOURCE'
if kind[0] == 'f' and not re.search('\\$IN\\b', cmd):
raise ValueError, 'Template.prepend: missing $IN in cmd'
if kind[1] == 'f' and not re.search('\\$OUT\\b', cmd):
raise ValueError, 'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError, "Template.open: rw must be 'r' or 'w', not %r" % (rw,)
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, 'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError, 'Template.open_w: pipeline begins with SOURCE'
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print cmd
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
if not list:
list.append(['', 'cat', '--', ''])
cmd, kind = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
cmd, kind = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
garbage = []
for i in range(1, len(list)):
lkind = list[i - 1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
fd, temp = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i - 1][-1] = list[i][0] = temp
for item in list:
inf, cmd, kind, outf = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
cmdlist = list[0][1]
for item in list[1:]:
cmd, kind = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
return cmdlist
_safechars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
return "'" + file.replace("'", '\'"\'"\'') + "'"
|
|
########################################################################
# Threes! is a game by by Asher Vollmer, Greg Wohlwend, Jimmy Hinson, #
# and Hidden Variable. This is created so that AI/ML strategies for #
# the game can be developed and tested easier, and is not intended as #
# a replacement of the original game. Please support the developers by #
# purchasing their excellent game! #
########################################################################
""" This is the Threes Board class.
The board creation, game logic and record keeping are all in this file.
For testing AI strategy with Threes, you probably only need this.
"""
###########
# Imports #
###########
from __future__ import print_function
from random import random, randint
from math import ceil
from TileDeck import TileDeck
from copy import deepcopy
##############
# Exceptions #
##############
# Raise this error if the board did not change after a swipe
class NoMovementError:
pass
# Raise this error if found out trying to place to many tiles on board
# during the initialization of the board
class TooManyTilesError:
pass
# Raise this error if found trying to move in a direction not allowed
class InvalidMoveError:
pass
###########################################
# Helper functions for creating the board #
###########################################
def _create_board(size=4):
"""Generating the empty starting game board
This board can be of any (nxn) size.
0 = a space with no tile
"""
return [[0 for j in range(size)] for i in range(size)]
# Should add in the comments why the deck is returned as well.
def _populate_board(board, deck, nTiles):
"""Put the starting tiles on the board
board - a list of list filled with 0's a la above function
deck - Threes! does not use random decks, see TileDeck.py
nTiles - determines how many tiles are used to populate board
Remaining tiles are returned to be used in the game.
"""
size = len(board)
# nTiles is one factor that I want to experiment with. This is a
# basic sanity check. You can't place more tiles on the board than
# there are spaces
if nTiles > size**2:
# nTiles = size**2 #Silently resolve error
raise TooManyTilesError
for i in range(nTiles):
tile = deck.get_next_tile()
# Place tiles randomly on the board
while True:
pos = int(ceil(size**2 * random())) - 1
x = pos // size
y = pos % size
if board[x][y] == 0:
board[x][y] = tile
break
return board, deck
####################################
# Helper function for board swipes #
####################################
def _shift_left(row):
"""Performs what happen at row level when you swipe left in Threes
Adding next tile does not happen at this level.
This is the fundamental operation of the board. All other behaviors
are based on this one.
"""
for i in range(1, len(row)):
# Move tile left if the left space is empty
if row[i-1] == 0:
row[i-1], row[i] = row[i], row[i-1]
# Merge left, if the two tiles are the same, and divisible by 3
elif row[i-1] == row[i] and row[i] % 3 == 0:
row[i-1] *= 2
row[i] = 0
# Merge left, if two tiles adds up to 3
elif row[i-1] + row[i] == 3:
row[i-1] = 3
row[i] = 0
return row
# I should specify that no tile means adding a tile = 0
def _swipe_left(board, tile=0):
"""Perform what happens at board level when you swipe left
Adds the next tile
Add no tile by default
"""
copy_board = deepcopy(board)
for row in copy_board:
row = _shift_left(row)
# If the board did not change, then it's not a legal move
if copy_board == board:
return board
else:
# Add next tile on a row that changed
while True:
pick = randint(0, len(board) - 1)
if board[pick] != copy_board[pick]:
copy_board[pick][-1] = tile
break
return copy_board
def _swipe_right(board, tile=0):
"""Perform what happens at board level when you swipe right
Based on _swipe_left
"""
return _reverse(_swipe_left(_reverse(board), tile))
def _swipe_up(board, tile=0):
"""Perform what happens at board level when you swipe up
Based on _swipe_left
"""
return _row2col(_swipe_left(_row2col(board), tile))
def _swipe_down(board, tile=0):
"""Perform what happens at board level when you swipe down
Based on _swipe_left
"""
return _row2col(_swipe_right(_row2col(board), tile))
# I should move _reverse and _row2col below _swipe_left
def _reverse(board):
"""Reverse the board right and left"""
for row in board:
row.reverse()
return board
def _row2col(board):
"""Reflect across the "y=x" diagonal"""
size = len(board)
for x in range(size):
for y in range(x):
board[y][x], board[x][y] = board[x][y], board[y][x]
return board
########################
# Helper Function MISC #
########################
def _get_highest(board):
"""Return highest tile on the board, the board is a list of list
Since the board unordered, you have to go through every element.
"""
highest_tile = 3 # highest tile at the beginning of a game
for row in board:
for e in row:
if e > highest_tile:
highest_tile = e
return highest_tile
######################
# Threes Board Class #
######################
class ThreesBoard(object):
"""Captures everything about the game of Threes!
It holds the board, tiles, and state of the game.
It defines what are the allowable changes to the state.
"""
def __init__(
self,
size=4, # standard Threes board size, do more with it later
nTiles=9, # standard Threes number of starting tiles
board=None, # None or previous board
deck=TileDeck(), # new tile deck, or previous deck
history=None, # None, or previous history
nextTile=0): # no tile, or previous tile
"""Creating the Threes board
If passing in an old board position, that game will be recreated
The tile deck will can also be recreated
"""
if board:
"""To consider: store all information in history
eliminate the need for old boards, decks ...
initialize a previous game in form of
board = history[1]
nextTile = history[2]
... etc
For next major version?
"""
# Passing in the old game; size, ntiles are all ignored
self.board = board
self.deck = TileDeck(deck.deck)
self.history = history
self.highestTile = _get_highest(self.board)
# If old game information was incomplete
if nextTile == 0:
self.nextTile = self.deck.get_next_tile(0)
else:
self.nextTile = nextTile
else:
# Starting a new game; ignore previous history ... etc
self.board = _create_board(size)
self.deck = TileDeck()
# Populating a new board with start up tiles
self.board, self.deck = _populate_board(self.board,
self.deck,
nTiles)
self.nextTile = self.deck.get_next_tile()
# Set up empty history, then set initial condition
self.history = []
# history formate is: move, resulting board, next tile)
self.history.append(('start', self.board, self.nextTile))
self.highestTile = 3
def swipe(self, move):
"""Same function for different swipes
This will make recording keeping easier
"""
direction = {'left': _swipe_left,
'right': _swipe_right,
'up': _swipe_up,
'down': _swipe_down}
copy_board = deepcopy(self.board)
try:
copy_board = direction[move](copy_board, self.nextTile)
except KeyError:
raise InvalidMoveError
if self.board == copy_board:
# raise NoMovementError
pass
else:
self.board = copy_board
self.highestTile = _get_highest(self.board)
self.nextTile = self.deck.get_next_tile(self.highestTile)
self.history.append((move, self.board, self.nextTile))
def get_valid_moves(self):
new_board = deepcopy(self.board)
moves = []
if _swipe_left(new_board) != self.board:
moves.append("left")
if _swipe_right(new_board) != self.board:
moves.append("right")
if _swipe_up(new_board) != self.board:
moves.append("up")
if _swipe_down(new_board) != self.board:
moves.append("down")
return moves
def gameOver(self):
new_board = deepcopy(self.board)
return (_swipe_left(new_board) == self.board and
_swipe_right(new_board) == self.board and
_swipe_up(new_board) == self.board and
_swipe_down(new_board) == self.board)
def __eq__(self, other):
# Consider add a check to history
return (self.board == other.board and
self.deck == other.deck and
self.nextTile == other.nextTile)
if __name__ == "__main__":
# Consider using a Python builtin testing framework
# Testing on generating correct instances of games
# test_board = [[1, 2, 3],
# [2, 3, 1],
# [3, 1, 2]]
# test_deck = TileDeck([1,2,3])
# test_history = ['start', test_board, 1]
# game1 = ThreesBoard()
# assert len(game1.board) == 4
# game2 = ThreesBoard(5)
# assert len(game2.board) == 5
# game3 = ThreesBoard(5, 25)
# assert len(game3.board) == 5
# for row in game3.board:
# assert 0 not in row
# assert len(game3.deck.deck) == 22
# game4 = ThreesBoard(5, 25, test_board)
# assert len(game4.board) == 3
# game5 = ThreesBoard(4, 9, [], TileDeck([1,2,3]), test_history)
# assert len(game5.deck.deck) == 14
# game6 = ThreesBoard(4, 9, test_history[1],
# TileDeck([1,2,3], test_history))
# assert len(game6.board) == 3
# Automated Play Test with Random Strategy
from random import choice
a = ThreesBoard()
while not a.gameOver():
moves = {'w': 'up',
'a': 'left',
's': 'down',
'd': 'right'}
try:
move = choice(list(moves.keys()))
a.swipe(moves[move])
except NoMovementError:
pass
print("\n The next tile is :", a.nextTile, '\n')
for row in a.board:
for tile in row:
print(str(tile).center(6), end = ' ')
print('')
print("\n This is the end board after using a random strategy.")
print("\n The highest tile obtained is " + str(a.highestTile) + \
", after playing " + str(len(a.history)) + " moves.")
# Consider better ways to report history
# for e in a.history:
# print(e)
|
|
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import collections
import io
import struct
from binascii import hexlify
from enum import IntEnum
class CoapMessageType(IntEnum):
CON = 0 # Confirmable
NON = 1 # Non-confirmable
ACK = 2 # Acknowledgement
RST = 3 # Reset
class CoapOptionsTypes(IntEnum):
IF_MATCH = 1
URI_HOST = 3
ETAG = 4
IF_NOT_MATCH = 5
URI_PORT = 7
LOCATION_PATH = 8
URI_PATH = 11
CONTENT_FORMAT = 12
MAX_AGE = 14
URI_QUERY = 15
ACCEPT = 17
LOCATION_QUERY = 20
PROXY_URI = 35
PROXY_SCHEME = 39
SIZE1 = 60
class CoapOptionHeader(object):
""" Class representing CoAP optiona header. """
def __init__(self, delta, length):
self._delta = delta
self._length = length
@property
def delta(self):
return self._delta
@property
def length(self):
return self._length
@property
def is_payload_marker(self):
return self.delta == 0xf and self.length == 0xf
@classmethod
def _read_extended_value(cls, data, value):
if value == 13:
return ord(data.read(1)) + 13
elif value == 14:
data.read(1)
return ord(data.read(1)) + 269
else:
return value
@classmethod
def from_bytes(cls, data):
initial_byte = ord(data.read(1))
delta = (initial_byte >> 4) & 0xf
length = initial_byte & 0xf
delta = cls._read_extended_value(data, delta)
length = cls._read_extended_value(data, length)
return cls(delta, length)
class CoapOption(object):
""" Class representing CoAP option. """
def __init__(self, _type, value):
self._type = _type
self._value = value
@property
def type(self):
return self._type
@property
def value(self):
return self._value
def __repr__(self):
return "CoapOption(type={}, value={})".format(self.type, hexlify(self.value))
class CoapOptionsFactory(object):
""" Factory that produces CoAP options. """
def parse(self, data, message_info):
options = []
_type = 0
while data.tell() < len(data.getvalue()):
option_header = CoapOptionHeader.from_bytes(data)
if option_header.is_payload_marker:
break
_type += option_header.delta
value = data.read(option_header.length)
option = CoapOption(_type, value)
options.append(option)
return options
class CoapCode(object):
""" Class representing CoAP code. """
def __init__(self, code):
self._code = code
@property
def code(self):
return self._code
@property
def _class(self):
return (self.code >> 5) & 0x7
@property
def detail(self):
return self.code & 0x1f
@classmethod
def from_class_and_detail(cls, _class, detail):
return cls(((_class & 0x7) << 5) | (detail & 0x1f))
@classmethod
def from_dotted(cls, dotted_str):
_class, detail = dotted_str.split(".")
return cls.from_class_and_detail(int(_class), int(detail))
def is_equal_dotted(self, dotted_code):
other = self.from_dotted(dotted_code)
return self.code == other.code
@property
def dotted(self):
return ".".join(["{:01d}".format(self._class), "{:02d}".format(self.detail)])
def __eq__(self, other):
if isinstance(other, int):
return self.code == other
elif isinstance(other, str):
return self.is_equal_dotted(other)
elif isinstance(other, self.__class__):
return self.code == other.code
else:
raise TypeError("Could not compare {} and {}".format(type(self), type(other)))
def __repr__(self):
return self.dotted
class CoapMessage(object):
""" Class representing CoAP message. """
def __init__(self, version, _type, code, message_id, token, options, payload, uri_path=None):
self._version = version
self._type = _type
self._code = code
self._message_id = message_id
self._token = token
self._options = options
self._payload = payload
self._uri_path = uri_path
@property
def version(self):
return self._version
@property
def type(self):
return self._type
@property
def code(self):
return self._code
@property
def message_id(self):
return self._message_id
@property
def token(self):
return self._token
@property
def tkl(self):
return len(self._token)
@property
def options(self):
return self._options
@property
def payload(self):
return self._payload
@property
def uri_path(self):
return self._uri_path
def __repr__(self):
options_str = ", ".join([repr(opt) for opt in self.options])
return "CoapMessage(version={}, type={}, code={}, message_id={}, token={}, options=[{}], payload={}, uri-path='{}')".format(
self.version, CoapMessageType.name[self.type], self.code, self.message_id, hexlify(self.token),
options_str, self.payload, self.uri_path)
class CoapMessageProxy(object):
""" Proxy class of CoAP message.
The main idea behind this class is to delay parsing payload. Due to architecture of the existing solution
it is possible to process confirmation message before a request message. In such case it is not possible
to get URI path to get proper payload parser.
"""
def __init__(self, coap_message, message_info, mid_to_uri_path_binder, uri_path_based_payload_factories):
self._coap_message = coap_message
self._message_info = message_info
self._mid_to_uri_path_binder = mid_to_uri_path_binder
self._uri_path_based_payload_factories = uri_path_based_payload_factories
@property
def version(self):
return self._coap_message.version
@property
def type(self):
return self._coap_message.type
@property
def code(self):
return self._coap_message.code
@property
def message_id(self):
return self._coap_message.message_id
@property
def token(self):
return self._coap_message.token
@property
def tkl(self):
return self._coap_message.tkl
@property
def options(self):
return self._coap_message.options
@property
def payload(self):
try:
binded_uri_path = self._mid_to_uri_path_binder.get_uri_path_for(self.message_id, self.token)
factory = self._uri_path_based_payload_factories[binded_uri_path]
return factory.parse(io.BytesIO(self._coap_message.payload), self._message_info)
except RuntimeError:
return self._coap_message.payload
@property
def uri_path(self):
return self._coap_message.uri_path
def __repr__(self):
options_str = ", ".join([repr(opt) for opt in self.options])
return "CoapMessageProxy(version={}, type={}, code={}, message_id={}, token={}, options=[{}], payload={}, uri-path='{}')".format(
self.version, self.type, self.code, self.message_id, hexlify(self.token),
options_str, self.payload, self.uri_path)
class CoapMessageIdToUriPathBinder:
""" Class binds message id and token with URI path. """
def __init__(self):
self._uri_path_binds = collections.defaultdict(collections.defaultdict)
def add_uri_path_for(self, message_id, token, uri_path):
self._uri_path_binds[message_id][hexlify(token)] = uri_path
def get_uri_path_for(self, message_id, token):
try:
return self._uri_path_binds[message_id][hexlify(token)]
except KeyError:
raise RuntimeError("Could not find URI PATH for message_id: {} and token: {}".format(
message_id, hexlify(token)))
class CoapMessageFactory(object):
""" Factory that produces CoAP messages. """
def __init__(self, options_factory, uri_path_based_payload_factories, message_id_to_uri_path_binder):
self._options_factory = options_factory
self._uri_path_based_payload_factories = uri_path_based_payload_factories
self._mid_to_uri_path_binder = message_id_to_uri_path_binder
def _uri_path_from(self, options):
uri_path_options = []
for option in options:
if option.type == CoapOptionsTypes.URI_PATH:
uri_path_options.append(option.value.decode("utf-8"))
if not uri_path_options:
return None
return "/" + "/".join(uri_path_options)
def _parse_initial_byte(self, data, message_info):
initial_byte = ord(data.read(1))
version = (initial_byte >> 6) & 0x3
_type = CoapMessageType((initial_byte >> 4) & 0x3)
token_length = initial_byte & 0xf
return version, _type, token_length
def parse(self, data, message_info):
version, _type, token_length = self._parse_initial_byte(data, message_info)
code = CoapCode(ord(data.read(1)))
message_id = struct.unpack(">H", data.read(2))[0]
token = data.read(token_length)
options = self._options_factory.parse(data, message_info)
uri_path = self._uri_path_from(options)
if uri_path is not None:
self._mid_to_uri_path_binder.add_uri_path_for(message_id, token, uri_path)
coap_message = CoapMessage(version, _type, code, message_id, token, options, data.read(), uri_path)
return CoapMessageProxy(coap_message, message_info, self._mid_to_uri_path_binder, self._uri_path_based_payload_factories)
|
|
#
# Kivy - Crossplatform NUI toolkit
# http://kivy.org/
#
import sys
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists
from os import walk, environ
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
if sys.version > '3':
PY3 = True
else:
PY3 = False
def getoutput(cmd):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
return p.communicate()[0]
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
for token in getoutput(cmd).split():
flag = flag_map.get(token[:2])
if not flag:
continue
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
return kw
# -----------------------------------------------------------------------------
# Determine on which platform we are
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
if exists('/opt/vc/include/bcm_host.h'):
platform = 'rpi'
# -----------------------------------------------------------------------------
# Detect options
#
c_options = {
'use_rpi': platform == 'rpi',
'use_opengl_es2': True,
'use_opengl_debug': False,
'use_glew': False,
'use_sdl': False,
'use_ios': False,
'use_mesagl': False,
'use_x11': False,
'use_gstreamer': False,
'use_avfoundation': platform == 'darwin'}
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
value = bool(int(environ[ukey]))
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
# -----------------------------------------------------------------------------
# Cython check
# on python-for-android and kivy-ios, cython usage is external
have_cython = False
if platform in ('ios', 'android'):
print('\nCython check avoided.')
else:
try:
# check for cython
from Cython.Distutils import build_ext
have_cython = True
except ImportError:
print('\nCython is missing, its required for compiling kivy !\n\n')
raise
if not have_cython:
from distutils.command.build_ext import build_ext
# -----------------------------------------------------------------------------
# Setup classes
class KivyBuildExt(build_ext):
def build_extensions(self):
print('Build configuration is:')
for opt, value in c_options.items():
print(' * {0} = {1}'.format(opt, value))
print('Generate config.h')
config_h = join(dirname(__file__), 'kivy', 'graphics', 'config.h')
with open(config_h, 'w') as fd:
fd.write('// Autogenerated file for Kivy C configuration\n')
for k, v in c_options.items():
fd.write('#define __{0} {1}\n'.format(k.upper(), int(v)))
print('Generate config.pxi')
config_pxi = join(dirname(__file__), 'kivy', 'graphics', 'config.pxi')
with open(config_pxi, 'w') as fd:
fd.write('# Autogenerated file for Kivy Cython configuration\n')
for k, v in c_options.items():
fd.write('DEF {0} = {1}\n'.format(k.upper(), int(v)))
fd.write('DEF PY3 = {0}\n'.format(int(PY3)))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
build_ext.build_extensions(self)
# -----------------------------------------------------------------------------
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi'):
pass
elif platform == 'win32':
print('Windows platform detected, force GLEW usage.')
c_options['use_glew'] = True
else:
# searching GLES headers
default_header_dirs = ['/usr/include', '/usr/local/include']
found = False
for hdir in default_header_dirs:
filename = join(hdir, 'GLES2', 'gl2.h')
if exists(filename):
found = True
print('Found GLES 2.0 headers at {0}'.format(filename))
break
if not found:
print('WARNING: GLES 2.0 headers are not found')
print('Fallback to Desktop opengl headers.')
c_options['use_opengl_es2'] = False
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
print('Activate SDL compilation.')
c_options['use_ios'] = True
c_options['use_sdl'] = True
# detect gstreamer, only on desktop
if platform not in ('ios', 'android'):
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
c_options['use_gstreamer'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename):
filename = filename.replace(sep, '/')
pyx = '.'.join(filename.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'kivy':
pyxl.pop(0)
if pyxl[1] == 'kivy':
pyxl.pop(0)
return '.'.join(pyxl)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': 'USE_EMBEDSIGNATURE' in environ}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
flags = {
'libraries': [],
'include_dirs': [],
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform == 'darwin':
v = os.uname()
if v[2] == '13.0.0':
sysroot = ('/Applications/Xcode5-DP.app/Contents/Developer'
'/Platforms/MacOSX.platform/Developer/SDKs'
'/MacOSX10.8.sdk/System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
return flags
def determine_gl_flags():
flags = {'libraries': []}
if platform == 'win32':
flags['libraries'] = ['opengl32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]
flags['extra_compile_args'] = ['-arch', osx_arch]
elif platform.startswith('freebsd'):
flags['include_dirs'] = ['/usr/local/include']
flags['extra_link_args'] = ['-L', '/usr/local/lib']
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['extra_link_args'] = ['-L', '/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['extra_link_args'] = ['-L', join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
flags['include_dirs'] = ['/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['extra_link_args'] = ['-L', '/opt/vc/lib']
flags['libraries'] = ['GLESv2']
else:
flags['libraries'] = ['GL']
if c_options['use_glew']:
if platform == 'win32':
flags['libraries'] += ['glew32']
else:
flags['libraries'] += ['GLEW']
return flags
def determine_sdl():
flags = {}
if not c_options['use_sdl']:
return flags
flags['libraries'] = ['SDL', 'SDL_ttf', 'freetype', 'z', 'bz2']
flags['include_dirs'] = []
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
# Paths as per homebrew (modified formula to use hg checkout)
if c_options['use_ios']:
# Note: on IOS, SDL is already loaded by the launcher/main.m
# So if we add it here, it will just complain about duplicate
# symbol, cause libSDL.a would be included in main.m binary +
# text_sdlttf.so
# At the result, we are linking without SDL explicitly, and add
# -undefined dynamic_lookup
# (/tito)
flags['libraries'] = ['SDL_ttf', 'freetype', 'bz2']
flags['include_dirs'] += [
join(kivy_ios_root, 'build', 'include'),
join(kivy_ios_root, 'build', 'include', 'SDL'),
join(kivy_ios_root, 'build', 'include', 'freetype')]
flags['extra_link_args'] += [
'-L', join(kivy_ios_root, 'build', 'lib'),
'-undefined', 'dynamic_lookup']
else:
flags['include_dirs'] = ['/usr/local/include/SDL']
flags['extra_link_args'] += ['-L/usr/local/lib/']
if platform == 'ios':
flags['extra_link_args'] += [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'MobileCoreServices',
'-framework', 'ImageIO']
elif platform == 'darwin':
flags['extra_link_args'] += [
'-framework', 'ApplicationServices']
return flags
def determine_graphics_pxd():
flags = {'depends': [join(dirname(__file__), 'kivy', x) for x in [
'graphics/buffer.pxd',
'graphics/c_opengl.pxd',
'graphics/c_opengl_debug.pxd',
'graphics/compiler.pxd',
'graphics/context_instructions.pxd',
'graphics/fbo.pxd',
'graphics/instructions.pxd',
'graphics/opengl_utils.pxd',
'graphics/shader.pxd',
'graphics/texture.pxd',
'graphics/transformation.pxd',
'graphics/vbo.pxd',
'graphics/vertex.pxd']]}
return flags
base_flags = determine_base_flags()
gl_flags = determine_gl_flags()
graphics_flags = determine_graphics_pxd()
# -----------------------------------------------------------------------------
# sources to compile
sources = {
'_event.pyx': base_flags,
'properties.pyx': base_flags,
'graphics/buffer.pyx': base_flags,
'graphics/context.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/c_opengl_debug.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/compiler.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/context_instructions.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/fbo.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/gl_instructions.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/instructions.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/opengl.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/opengl_utils.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/shader.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/stencil_instructions.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/texture.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/transformation.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/vbo.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/vertex.pyx': merge(
base_flags, gl_flags, graphics_flags),
'graphics/vertex_instructions.pyx': merge(
base_flags, gl_flags, graphics_flags)}
if c_options['use_sdl']:
sdl_flags = determine_sdl()
sources['core/window/sdl.pyx'] = merge(
base_flags, gl_flags, sdl_flags)
sources['core/text/text_sdlttf.pyx'] = merge(
base_flags, gl_flags, sdl_flags)
sources['core/audio/audio_sdl.pyx'] = merge(
base_flags, sdl_flags)
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= (10, 7):
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': [join(dirname(__file__),
'kivy/core/camera/camera_avfoundation_implem.m')]}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
#sources['core/window/window_egl_rpi.pyx'] = merge(
# base_flags, gl_flags)
if c_options['use_x11']:
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, graphics_flags, {
'depends': [join(dirname(__file__),
'kivy/core/window/window_x11_core.c')],
'libraries': ['Xrender', 'X11']
})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags)
# -----------------------------------------------------------------------------
# extension modules
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
pyx = join(dirname(__file__), 'kivy', pyx)
if not have_cython:
pyx = '%s.c' % pyx[:-4]
depends = []
else:
depends = flags.pop('depends', [])
module_name = get_modulename_from_file(pyx)
flags_clean = {}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(module_name,
[pyx] + depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if not directory in examples:
examples[directory] = []
examples[directory].append(filename)
# -----------------------------------------------------------------------------
# setup !
setup(
name='Kivy',
version=kivy.__version__,
author='Kivy Crew',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org/',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=[
'kivy',
'kivy.adapters',
'kivy.core',
'kivy.core.audio',
'kivy.core.camera',
'kivy.core.clipboard',
'kivy.core.image',
'kivy.core.gl',
'kivy.core.spelling',
'kivy.core.text',
'kivy.core.video',
'kivy.core.window',
'kivy.effects',
'kivy.ext',
'kivy.graphics',
'kivy.input',
'kivy.input.postproc',
'kivy.input.providers',
'kivy.lib',
'kivy.lib.osc',
'kivy.lib.gstplayer',
'kivy.lib.vidcore_lite',
'kivy.modules',
'kivy.network',
'kivy.storage',
'kivy.tools',
'kivy.tools.packaging',
'kivy.tools.packaging.pyinstaller_hooks',
'kivy.tools.highlight',
'kivy.extras',
'kivy.tools.extensions',
'kivy.uix', ],
package_dir={'kivy': 'kivy'},
package_data={'kivy': [
'data/*.kv',
'data/*.json',
'data/fonts/*.ttf',
'data/images/*.png',
'data/images/*.jpg',
'data/images/*.gif',
'data/images/*.atlas',
'data/keyboards/*.json',
'data/logo/*.png',
'data/glsl/*.png',
'data/glsl/*.vs',
'data/glsl/*.fs',
'tools/highlight/*.vim',
'tools/highlight/*.el',
'tools/packaging/README.txt',
'tools/packaging/win32/kivy.bat',
'tools/packaging/win32/kivyenv.sh',
'tools/packaging/win32/README.txt',
'tools/packaging/osx/Info.plist',
'tools/packaging/osx/InfoPlist.strings',
'tools/packaging/osx/kivy.sh']},
data_files=list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: User Interfaces'],
dependency_links=[
'https://github.com/kivy-garden/garden/archive/master.zip'],
install_requires=['Kivy-Garden==0.1.1'])
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 23:32:57 2021
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.pytest import pytest_warns
from statsmodels.compat.scipy import SP_LT_15
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
from scipy import stats
from statsmodels.distributions.copula.archimedean import (
ArchimedeanCopula,
ClaytonCopula,
FrankCopula,
GumbelCopula,
)
from statsmodels.distributions.copula.copulas import CopulaDistribution
import statsmodels.distributions.copula.depfunc_ev as trev
from statsmodels.distributions.copula.elliptical import (
GaussianCopula,
StudentTCopula,
)
from statsmodels.distributions.copula.extreme_value import (
ExtremeValueCopula,
copula_bv_ev,
)
from statsmodels.distributions.copula.other_copulas import IndependenceCopula
import statsmodels.distributions.copula.transforms as tra
from statsmodels.distributions.tools import (
approx_copula_pdf,
frequencies_fromdata,
)
from statsmodels.tools.numdiff import approx_fprime_cs, approx_hess
uniform = stats.uniform
ev_list = [
[trev.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],
[trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],
# note evd has asymmetry reversed, interchange variables
[trev.transform_tawn2, 0.5, 0.9, (0.25, 0.05), 0.464357480263932],
[trev.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.4916117128670654],
[trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.48340673415789],
# note evd has parameter for hr 1/lmbda (inverse of our parameter)
[trev.transform_hr, 0.5, 0.9, (2,), 0.4551235014298542],
[trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1 / 0.25), 0.4543698299835434],
[trev.transform_joe, 0.9, 0.5, (0.5, 0.75, 1 / 0.25), 0.4539773435983587],
# tev is against R `copula` package
# > cop = tevCopula(0.8, df = 4)
# > pCopula(c(0.5, 0.75), cop)
# [1] 0.456807960674953
# > pCopula(c(0.5, 0.9), cop)
# [1] 0.4911039761533587
[trev.transform_tev, 0.5, 0.75, (0.8, 4), 0.456807960674953],
[trev.transform_tev, 0.5, 0.9, (0.8, 4), 0.4911039761533587],
]
ev_dep_list = [
# [trev.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],
[trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117,
[0.8952847075210475, 0.8535533905932737, 0.8952847075210475]],
# abvevd(c(0.25, 0.5, 0.75), dep=0.25, asy = c(0.5, 0.75), model = "alog")
[trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.25), 0.4724570876035117,
[0.8753426223607659, 0.7672861240893745, 0.8182268471629245]],
[trev.transform_tawn2, 0.4, 0.9, (0.3, 0.2), 0,
[0.8968750000000001, 0.8500000000000000, 0.8781249999999999]],
# # note evd has asymmetry reversed, interchange variables - NOT anymore
# [trev.transform_tawn2, 0.9, 0.5, (0.25, 0.05), 0.464357480263932],
# [trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4916117128670654],
# [trev.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.48340673415789],
# # note evd has parameter for hr 1/lmbda (inverse of our parameter)
[trev.transform_hr, 0.5, 0.9, (1/2,), 0.4551235014298542,
[0.7774638908611127, 0.6914624612740130, 0.7774638908611127]],
# [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.4543698299835434],
# [trev.transform_joe, 0.9, 0.5, (0.5, 0.75, 1/0.25), 0.4539773435983587],
# > abvevd(c(0.25, 0.5, 0.75), dep=0.75, asy=c(0.5, 0.75), model="aneglog")
# [1] 0.9139915932031195 0.8803412942173715 0.8993537417026507
[trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.75), 0.,
[0.9139915932031195, 0.8803412942173715, 0.8993537417026507]]
]
cop_list = [
[tra.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225, 0.9257812360337806],
[tra.TransfGumbel, 0.5, 0.9, (2,), 0.4960348880595387, 0.3973548776136501],
[tra.TransfClayton, 0.5, 0.9, (2,), 0.485954322440435, 0.8921974147432954],
[tra.TransfIndep, 0.5, 0.5, (), 0.25, 1],
]
gev_list = [
# [cop.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],
# > pbvevd(c(0.5,0.9), dep = 0.25, asy = c(0.5, 0.5), model = "alog")
# [trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.25),
# 0.4386367545837274, 0.12227570158361],
[trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.25),
0.4868879662205026, 0.4646154226541540, 0.1897142141905908],
[trev.transform_tawn2, 0.4, 0.9, (0.3, 0.2),
0.3838690483829361, 0.3989785485000293, 0.1084278364284748],
# [trev.transform_tawn2, 0.5, 0.5, (0.5, 0.25), 0.387629940606913,
# 0.1383277275273335],
# [trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4519820720233402,
# 0.1162545305128522], # fails in pdf
# note evd has parameter for hr 1/lmbda (inverse of our parameter)
[trev.transform_hr, 0.4, 0.9, (2,),
0.36459381872178737, 0.34879372499897571, 0.09305880295825367],
# [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.3700584213780548,
# 0.08992436735088952],
[trev.transform_joe, 0.4, 0.9, (0.5, 0.75, 1/0.25),
0.36391125216656162, 0.34752631779552950, 0.09316705199822513],
]
def check_cop_rvs(cop, rvs=None, nobs=2000, k=10, use_pdf=True):
if rvs is None:
rvs = cop.rvs(nobs)
freq = frequencies_fromdata(rvs, k, use_ranks=True)
if use_pdf:
pdfg = approx_copula_pdf(cop, k_bins=k, force_uniform=True)
count_pdf = pdfg * nobs
else:
# use copula cdf if available
raise NotImplementedError
mask = count_pdf < 2
if mask.sum() > 5:
cp = count_pdf[mask]
cp = np.concatenate([cp, [nobs - cp.sum()]])
fr = freq[mask]
cp = np.concatenate([fr, [nobs - fr.sum()]])
else:
fr = freq.ravel()
cp = count_pdf.ravel()
chi2_test = stats.chisquare(freq.ravel(), count_pdf.ravel())
return chi2_test, rvs
extrali = [
[trev.transform_tawn, 0.5, 0.9, (0.8, 0.5, 0.75), 0.4724570876035117],
[trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.5), 0.4724570876035117],
[trev.transform_tawn, 0.6, 0.4, (0.2, 0.7, 0.6), 0.4724570876035117],
]
@pytest.mark.parametrize("case", ev_list + extrali)
def test_ev_copula(case):
# check ev copulas, cdf and transform against R `evd` package
ev_tr, v1, v2, args, res1 = case
res = copula_bv_ev([v1, v2], ev_tr, args=args)
# assert_allclose(res, res1, rtol=1e-13)
# check derivatives of dependence function
if ev_tr in (trev.transform_bilogistic, trev.transform_tev):
return
d1_res = approx_fprime_cs(np.array([v1, v2]), ev_tr.evaluate, args=args)
d1_res = np.diag(d1_res)
d1 = ev_tr.deriv(np.array([v1, v2]), *args)
assert_allclose(d1, d1_res, rtol=1e-8)
d1_res = approx_hess(np.array([0.5]), ev_tr.evaluate, args=args)
d1_res = np.diag(d1_res)
d1 = ev_tr.deriv2(0.5, *args)
assert_allclose(d1, d1_res, rtol=1e-7)
@pytest.mark.parametrize("case", ev_dep_list)
def test_ev_dep(case):
ev_tr, v1, v2, args, res1, res2 = case # noqa
t = np.array([0.25, 0.5, 0.75])
df = ev_tr(t, *args)
assert_allclose(df, res2, rtol=1e-13)
@pytest.mark.parametrize("case", cop_list)
def test_copulas(case):
# check ev copulas, cdf and transform against R `copula` package
cop_tr, v1, v2, args, cdf2, pdf2 = case
ca = ArchimedeanCopula(cop_tr())
cdf1 = ca.cdf([v1, v2], args=args)
pdf1 = ca.pdf([v1, v2], args=args)
assert_allclose(cdf1, cdf2, rtol=1e-13)
assert_allclose(pdf1, pdf2, rtol=1e-13)
logpdf1 = ca.logpdf([v1, v2], args=args)
assert_allclose(logpdf1, np.log(pdf2), rtol=1e-13)
@pytest.mark.parametrize("case", ev_list)
def test_ev_copula_distr(case):
# check ev copulas, cdf and transform against R `evd` package
ev_tr, v1, v2, args, res1 = case
u = [v1, v2]
res = copula_bv_ev(u, ev_tr, args=args)
assert_allclose(res, res1, rtol=1e-13)
ev = ExtremeValueCopula(ev_tr)
cdf1 = ev.cdf(u, args)
assert_allclose(cdf1, res1, rtol=1e-13)
cev = CopulaDistribution(ev, [uniform, uniform], cop_args=args)
cdfd = cev.cdf(np.array(u), cop_args=args)
assert_allclose(cdfd, res1, rtol=1e-13)
assert cdfd.shape == ()
# using list u
cdfd = cev.cdf(u, cop_args=args)
assert_allclose(cdfd, res1, rtol=1e-13)
assert cdfd.shape == ()
# check vector values for u
# bilogistic is not vectorized, uses integrate.quad
if ev_tr != trev.transform_bilogistic:
cdfd = cev.cdf(np.array(u) * np.ones((3, 1)), cop_args=args)
assert_allclose(cdfd, res1, rtol=1e-13)
assert cdfd.shape == (3,)
@pytest.mark.parametrize("case", cop_list)
def test_copulas_distr(case):
# check ev copulas, cdf and transform against R `copula` package
cop_tr, v1, v2, args, cdf2, pdf2 = case
u = [v1, v2]
ca = ArchimedeanCopula(cop_tr())
cdf1 = ca.cdf(u, args=args)
pdf1 = ca.pdf(u, args=args)
cad = CopulaDistribution(ca, [uniform, uniform], cop_args=args)
cdfd = cad.cdf(np.array(u), cop_args=args)
assert_allclose(cdfd, cdf1, rtol=1e-13)
assert cdfd.shape == ()
# check pdf
pdfd = cad.pdf(np.array(u), cop_args=args)
assert_allclose(pdfd, pdf1, rtol=1e-13)
assert cdfd.shape == ()
# using list u
cdfd = cad.cdf(u, cop_args=args)
assert_allclose(cdfd, cdf1, rtol=1e-13)
assert cdfd.shape == ()
assert_allclose(cdf1, cdf2, rtol=1e-13)
assert_allclose(pdf1, pdf2, rtol=1e-13)
# check vector values for u
cdfd = cad.cdf(np.array(u) * np.ones((3, 1)), cop_args=args)
assert_allclose(cdfd, cdf2, rtol=1e-13)
assert cdfd.shape == (3,)
# check mv, check at marginal cdf
cdfmv = ca.cdf([v1, v2, 1], args=args)
assert_allclose(cdfmv, cdf1, rtol=1e-13)
assert cdfd.shape == (3,)
@pytest.mark.parametrize("case", gev_list)
def test_gev_genextreme(case):
gev = stats.genextreme(0)
# check ev copulas, cdf and transform against R `evt` package
ev_tr, v1, v2, args, res0, res1, res2 = case
y = [v1, v2]
u = gev.cdf(y)
res = copula_bv_ev(u, ev_tr, args=args)
assert_allclose(res, res1, rtol=1e-13)
ev = ExtremeValueCopula(ev_tr)
# evaluated at using u = y
cdf1 = ev.cdf(y, args)
assert_allclose(cdf1, res0, rtol=1e-13)
# evaluated at transformed u = F(y)
cdf1 = ev.cdf(u, args)
assert_allclose(cdf1, res1, rtol=1e-13)
cev = CopulaDistribution(ev, [gev, gev], cop_args=args)
cdfd = cev.cdf(np.array(y), cop_args=args)
assert_allclose(cdfd, res1, rtol=1e-13)
pdfd = cev.pdf(np.array(y), cop_args=args)
assert_allclose(pdfd, res2, rtol=1e-13)
class TestFrank:
def test_basic(self):
case = [tra.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225,
0.9257812360337806]
cop_tr, v1, v2, args, cdf2, pdf2 = case
cop = FrankCopula()
pdf1 = cop.pdf([v1, v2], args=args)
assert_allclose(pdf1, pdf2, rtol=1e-13)
logpdf1 = cop.logpdf([v1, v2], args=args)
assert_allclose(logpdf1, np.log(pdf2), rtol=1e-13)
cdf1 = cop.cdf([v1, v2], args=args)
assert_allclose(cdf1, cdf2, rtol=1e-13)
assert isinstance(cop.transform, cop_tr)
# round trip conditional, no verification
u = [0.6, 0.5]
cdfc = cop.cdfcond_2g1(u, args=args)
ppfc = cop.ppfcond_2g1(cdfc, [0.6], args=args)
assert_allclose(ppfc, u[1], rtol=1e-13)
# The reference results are coming from the R package Copula.
# See ``copula_r_tests.rst`` for more details.
class CheckCopula:
"""Generic tests for copula."""
copula = None
dim = None
u = np.array([[0.33706249, 0.6075078],
[0.62232507, 0.06241089],
[0.2001457, 0.54027684],
[0.77166391, 0.40610225],
[0.98534253, 0.99212789],
[0.72755898, 0.25913165],
[0.05943888, 0.61044613],
[0.0962475, 0.67585563],
[0.35496733, 0.79584436],
[0.44513594, 0.23050014]])
pdf_u = None
cdf_u = None
def _est_visualization(self):
sample = self.copula.rvs(10000)
assert sample.shape == (10000, 2)
# h = sns.jointplot(sample[:, 0], sample[:, 1], kind='hex')
# h.set_axis_labels('X1', 'X2', fontsize=16)
def test_pdf(self):
pdf_u_test = self.copula.pdf(self.u)
assert_array_almost_equal(self.pdf_u, pdf_u_test)
def test_cdf(self):
cdf_u_test = self.copula.cdf(self.u)
assert_array_almost_equal(self.cdf_u, cdf_u_test)
def test_validate_params(self):
pass
def test_rvs(self):
nobs = 2000
rng = np.random.RandomState(27658622)
self.rvs = rvs = self.copula.rvs(nobs, random_state=rng)
assert rvs.shape == (nobs, 2)
assert_array_almost_equal(
np.mean(rvs, axis=0), np.repeat(0.5, self.dim), decimal=2
)
# check empirical quantiles, uniform
q0 = np.percentile(rvs, [25, 50, 75], axis=0)
q1 = np.repeat(np.array([[0.25, 0.5, 0.75]]).T, 2, axis=1)
assert_allclose(q0, q1, atol=0.025)
tau = stats.kendalltau(*rvs.T)[0]
tau_cop = self.copula.tau()
assert_allclose(tau, tau_cop, rtol=0.08, atol=0.005)
if isinstance(self.copula, IndependenceCopula):
# skip rest, no `_arg_from_tau` in IndependenceCopula
return
theta = self.copula.fit_corr_param(rvs)
theta_cop = getattr(self.copula, "theta", None)
if theta_cop is None:
# elliptical
theta_cop = self.copula.corr[0, 1]
assert_allclose(theta, theta_cop, rtol=0.1, atol=0.005)
class CheckModernCopula(CheckCopula):
@pytest.mark.parametrize(
"seed", ["random_state", "generator", "qmc", None, 0]
)
def test_seed(self, seed):
if SP_LT_15 and seed in ("generator", 0):
pytest.xfail(reason="Generator not supported for SciPy <= 1.3")
if seed == "random_state":
seed1 = np.random.RandomState(0)
seed2 = np.random.RandomState(0)
elif seed == "generator":
seed1 = np.random.default_rng(0)
seed2 = 0
elif seed is None:
seed1 = None
singleton = np.random.mtrand._rand
seed2 = np.random.RandomState()
seed2.set_state(singleton.get_state())
elif seed == "qmc":
if not hasattr(stats, "qmc"):
pytest.skip("QMC not available")
else:
pytest.xfail("QMC not working")
seed1 = stats.qmc.Halton(2)
seed2 = stats.qmc.Halton(2)
else:
seed1 = 0
seed2 = np.random.default_rng(0)
nobs = 2000
expected_warn = None if seed1 is not None else FutureWarning
with pytest_warns(expected_warn):
rvs1 = self.copula.rvs(nobs, random_state=seed1)
rvs2 = self.copula.rvs(nobs, random_state=seed2)
assert_allclose(rvs1, rvs2)
class TestIndependenceCopula(CheckCopula):
copula = IndependenceCopula()
dim = 2
pdf_u = np.ones(10)
cdf_u = np.prod(CheckCopula.u, axis=1)
class TestGaussianCopula(CheckCopula):
copula = GaussianCopula(corr=[[1.0, 0.8], [0.8, 1.0]])
dim = 2
pdf_u = [1.03308741, 0.06507279, 0.72896012, 0.65389439, 16.45012399,
0.34813218, 0.06768115, 0.08168840, 0.40521741, 1.26723470]
cdf_u = [0.31906854, 0.06230196, 0.19284669, 0.39952707, 0.98144792,
0.25677003, 0.05932818, 0.09605404, 0.35211017, 0.20885480]
def test_rvs(self):
# copied from student t test,
# currently inconsistent with non-elliptical copulas
super().test_rvs()
chi2t, rvs = check_cop_rvs(
self.copula, rvs=self.rvs, nobs=2000, k=10, use_pdf=True
)
assert chi2t.pvalue > 0.1
tau = stats.kendalltau(*rvs.T)[0]
tau_cop = self.copula.tau()
assert_allclose(tau, tau_cop, rtol=0.05)
class TestStudentTCopula(CheckCopula):
copula = StudentTCopula(corr=[[1.0, 0.8], [0.8, 1.0]], df=2)
dim = 2
pdf_u = [0.8303065, 0.1359839, 0.5157746, 0.4776421, 26.2173959,
0.3070661, 0.1349173, 0.1597064, 0.3303230, 1.0482301]
cdf_u = [0.31140349, 0.05942746, 0.18548601, 0.39143974, 0.98347259,
0.24894028, 0.05653947, 0.09210693, 0.34447385, 0.20429882]
def test_cdf(self):
pytest.skip("Not implemented.")
def test_rvs(self):
super().test_rvs()
chi2t, rvs = check_cop_rvs(
self.copula, rvs=self.rvs, nobs=2000, k=10, use_pdf=True
)
assert chi2t.pvalue > 0.1
tau = stats.kendalltau(*rvs.T)[0]
tau_cop = self.copula.tau()
assert_allclose(tau, tau_cop, rtol=0.05)
class TestClaytonCopula(CheckModernCopula):
copula = ClaytonCopula(theta=1.2)
dim = 2
pdf_u = [1.0119836, 0.2072728, 0.8148839, 0.9481976, 2.1419659,
0.6828507, 0.2040454, 0.2838497, 0.8197787, 1.1096360]
cdf_u = [0.28520375, 0.06101690, 0.17703377, 0.36848218, 0.97772088,
0.24082057, 0.05811908, 0.09343934, 0.33012582, 0.18738753]
class TestFrankCopula(CheckModernCopula):
copula = FrankCopula(theta=3)
dim = 2
pdf_u = [0.9646599, 0.5627195, 0.8941964, 0.8364614, 2.9570945,
0.6665601, 0.5779906, 0.5241333, 0.7156741, 1.1074024]
cdf_u = [0.27467496, 0.05492539, 0.15995939, 0.36750702, 0.97782283,
0.23412757, 0.05196265, 0.08676979, 0.32803721, 0.16320730]
class TestGumbelCopula(CheckModernCopula):
copula = GumbelCopula(theta=1.5)
dim = 2
pdf_u = [1.0391696, 0.6539579, 0.9878446, 0.8679504, 16.6030932,
0.7542073, 0.6668307, 0.6275887, 0.7477991, 1.1564864]
cdf_u = [0.27194634, 0.05484380, 0.15668190, 0.37098420, 0.98176346,
0.23422865, 0.05188260, 0.08659615, 0.33086960, 0.15803914]
|
|
"""Test harness for the zipapp module."""
import io
import pathlib
import stat
import sys
import tempfile
import unittest
import zipapp
import zipfile
from unittest.mock import patch
class ZipAppTest(unittest.TestCase):
"""Test zipapp module functionality."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def test_create_archive(self):
# Test packing a directory.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertTrue(target.is_file())
def test_create_archive_with_pathlib(self):
# Test packing a directory using Path objects for source and target.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
self.assertTrue(target.is_file())
def test_create_archive_with_subdirs(self):
# Test packing a directory includes entries for subdirectories.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'foo').mkdir()
(source / 'bar').mkdir()
(source / 'foo' / '__init__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target)
target.seek(0)
with zipfile.ZipFile(target, 'r') as z:
self.assertIn('foo/', z.namelist())
self.assertIn('bar/', z.namelist())
def test_create_archive_default_target(self):
# Test packing a directory to the default name.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
zipapp.create_archive(str(source))
expected_target = self.tmpdir / 'source.pyz'
self.assertTrue(expected_target.is_file())
def test_no_main(self):
# Test that packing a directory with no __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target))
def test_main_and_main_py(self):
# Test that supplying a main argument with __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
def test_main_written(self):
# Test that the __main__.py is written correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertIn('__main__.py', z.namelist())
self.assertIn(b'pkg.mod.fn()', z.read('__main__.py'))
def test_main_only_written_once(self):
# Test that we don't write multiple __main__.py files.
# The initial implementation had this bug; zip files allow
# multiple entries with the same name
source = self.tmpdir / 'source'
source.mkdir()
# Write 2 files, as the original bug wrote __main__.py
# once for each file written :-(
# See http://bugs.python.org/review/23491/diff/13982/Lib/zipapp.py#newcode67Lib/zipapp.py:67
# (line 67)
(source / 'foo.py').touch()
(source / 'bar.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertEqual(1, z.namelist().count('__main__.py'))
def test_main_validation(self):
# Test that invalid values for main are rejected.
source = self.tmpdir / 'source'
source.mkdir()
target = self.tmpdir / 'source.pyz'
problems = [
'', 'foo', 'foo:', ':bar', '12:bar', 'a.b.c.:d',
'.a:b', 'a:b.', 'a:.b', 'a:silly name'
]
for main in problems:
with self.subTest(main=main):
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main=main)
def test_default_no_shebang(self):
# Test that no shebang line is written to the target by default.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
with target.open('rb') as f:
self.assertNotEqual(f.read(2), b'#!')
def test_custom_interpreter(self):
# Test that a shebang line with a custom interpreter is written
# correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
with target.open('rb') as f:
self.assertEqual(f.read(2), b'#!')
self.assertEqual(b'python\n', f.readline())
def test_pack_to_fileobj(self):
# Test that we can pack to a file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
self.assertTrue(target.getvalue().startswith(b'#!python\n'))
def test_read_shebang(self):
# Test that we can read the shebang line correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertEqual(zipapp.get_interpreter(str(target)), 'python')
def test_read_missing_shebang(self):
# Test that reading the shebang line of a file without one returns None.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertEqual(zipapp.get_interpreter(str(target)), None)
def test_modify_shebang(self):
# Test that we can change the shebang of a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(str(new_target)), 'python2.7')
def test_write_shebang_to_fileobj(self):
# Test that we can change the shebang of a file, writing the result to a
# file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = io.BytesIO()
zipapp.create_archive(str(target), new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_read_from_pathobj(self):
# Test that we can copy an archive using an pathlib.Path object
# for the source.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target1 = self.tmpdir / 'target1.pyz'
target2 = self.tmpdir / 'target2.pyz'
zipapp.create_archive(source, target1, interpreter='python')
zipapp.create_archive(target1, target2, interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(target2), 'python2.7')
def test_read_from_fileobj(self):
# Test that we can copy an archive using an open file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
temp_archive = io.BytesIO()
zipapp.create_archive(str(source), temp_archive, interpreter='python')
new_target = io.BytesIO()
temp_archive.seek(0)
zipapp.create_archive(temp_archive, new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_remove_shebang(self):
# Test that we can remove the shebang from a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter=None)
self.assertEqual(zipapp.get_interpreter(str(new_target)), None)
def test_content_of_copied_archive(self):
# Test that copying an archive doesn't corrupt it.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
new_target = io.BytesIO()
target.seek(0)
zipapp.create_archive(target, new_target, interpreter=None)
new_target.seek(0)
with zipfile.ZipFile(new_target, 'r') as z:
self.assertEqual(set(z.namelist()), {'__main__.py'})
# (Unix only) tests that archives with shebang lines are made executable
@unittest.skipIf(sys.platform == 'win32' or sys.platform == 'uwp',
'Windows does not support an executable bit')
def test_shebang_is_executable(self):
# Test that an archive with a shebang line is made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertTrue(target.stat().st_mode & stat.S_IEXEC)
@unittest.skipIf(sys.platform == 'win32',
'Windows does not support an executable bit')
def test_no_shebang_is_not_executable(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter=None)
self.assertFalse(target.stat().st_mode & stat.S_IEXEC)
class ZipAppCmdlineTest(unittest.TestCase):
"""Test zipapp module command line API."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def make_archive(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
return target
def test_cmdline_create(self):
# Test the basic command line API.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
args = [str(source)]
zipapp.main(args)
target = source.with_suffix('.pyz')
self.assertTrue(target.is_file())
def test_cmdline_copy(self):
# Test copying an archive.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target)]
zipapp.main(args)
self.assertTrue(target.is_file())
def test_cmdline_copy_inplace(self):
# Test copying an archive in place fails.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(original)]
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero returm code.
self.assertTrue(cm.exception.code)
def test_cmdline_copy_change_main(self):
# Test copying an archive doesn't allow changing __main__.py.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target), '-m', 'foo:bar']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero returm code.
self.assertTrue(cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
def test_info_command(self, mock_stdout):
# Test the output of the info command.
target = self.make_archive()
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a zero returm code.
self.assertEqual(cm.exception.code, 0)
self.assertEqual(mock_stdout.getvalue(), "Interpreter: <none>\n")
def test_info_error(self):
# Test the info command fails when the archive does not exist.
target = self.tmpdir / 'dummy.pyz'
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero returm code.
self.assertTrue(cm.exception.code)
if __name__ == "__main__":
unittest.main()
|
|
import csv
import requests
import re
from sys import argv
from bs4 import BeautifulSoup
import dateutil.parser
from os import system
from pathlib import PurePosixPath
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path_1 = 'sale_results.asp'
report_path_2 = 'sale%20results/'
strip_char = ';,. \n\t'
temp_raw = scrape_util.ReportRaw(argv, prefix)
def get_sale_date(this_report):
match = re.search(r'(.+?)([0-9]+)_([0-9]+)', this_report)
date_string = ' '.join([match.group(i) for i in range(1, 4)])
sale_date = dateutil.parser.parse(date_string, fuzzy=True)
return sale_date
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line)
is_not_succinct = len(re.split(r'\s{2,}', this_line)) > 3
return bool(has_price and is_not_succinct)
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/#]|cw?t?|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale_head(line):
sale_head = ''
for this_line in line:
match = re.search(r'head *sold ?: *([0-9,]+)', this_line, re.IGNORECASE)
if match:
sale_head = match.group(1).replace(',','')
break
if not match:
match = re.search(r'([0-9,]+) *head', this_line, re.IGNORECASE)
if match:
sale_head = match.group(1).replace(',','')
break
return sale_head
def get_sale_location(word):
"""Convert address strings into a list of address components."""
sale_location = ' '.join(word)
if ',' in sale_location:
sale_location = sale_location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + r')$', sale_location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def get_sale(word):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
number_word = [idx for idx, val in enumerate(word) if is_number(val)]
name_location = ' '.join(word[0:number_word[0]])
location_match = re.search(r'\[(.*)\]', name_location,re.IGNORECASE)
if location_match:
sale_location = [location_match.group(1).strip()]
consignor_name = name_location.replace(location_match.group(),'')
sale_location = get_sale_location(sale_location)
else:
location_incomplete_match = re.search(r'\[(.*)', name_location,re.IGNORECASE)
if location_incomplete_match:
consignor_name = name_location.replace(location_incomplete_match.group(),'')
if re.search(scrape_util.state + r'$', location_incomplete_match.group(1)):
sale_location = [location_incomplete_match.group(1).strip()]
sale_location = get_sale_location(sale_location)
else:
sale_location = []
else:
consignor_name = name_location
sale_location = []
sale = {
'consignor_name': consignor_name.strip(strip_char).title(),
'cattle_cattle': ' '.join(word[number_word[0]+1:number_word[1]])
}
if sale_location:
sale['consignor_city'] = sale_location.pop(0).strip(strip_char).title()
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
head_string = word[number_word[0]].strip(strip_char).replace(',','')
try:
int(head_string)
sale['cattle_head'] = head_string
except ValueError:
pass
weight_string = word[number_word[1]].strip(strip_char).replace(',','').replace('#','')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
price_string = ''.join(word[number_word[2]:])
match = False
if not match:
match = re.search(r'([0-9,.]+) ?/?he?a?d?', price_string, re.IGNORECASE)
key = 'cattle_price'
if not match:
match = re.search(r'([0-9,.]+) ?/?c?w?t?', price_string, re.IGNORECASE)
key = 'cattle_price_cwt'
if match:
sale[key] = match.group(1).replace(',', '').strip(strip_char)
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
for idx, this_line in enumerate(line):
if is_sale(this_line):
sale = this_default_sale.copy()
word = re.split(r'\s{2,}|(?<=\d)\s', this_line)
sale.update(get_sale(word))
writer.writerow(sale)
def main():
# get URLs for all reports
response = requests.get(
base_url + report_path_1,
headers = scrape_util.url_header,
)
soup = BeautifulSoup(response.content, 'lxml')
content = soup.find_all('table')
report = content[1].find_all('a')
# Identify existing reports
archive = scrape_util.ArchiveFolder(argv, prefix)
# write csv file for each historical report
for this_report in report:
this_report = this_report['href']
if 'consignments/' in this_report:
continue
this_report_stem = PurePosixPath(this_report).stem
sale_date = get_sale_date(this_report_stem)
io_name = archive.new_csv(sale_date)
if not io_name:
continue
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
# create temporary text file from downloaded pdf
this_report = this_report_stem + '.pdf'
response = requests.get(
base_url + report_path_2 + this_report,
headers = scrape_util.url_header,
)
with temp_raw.open('wb') as io:
io.write(response.content)
system(scrape_util.pdftotext.format(str(temp_raw)))
# read sale text into line list
temp_txt = temp_raw.with_suffix('.txt')
with temp_txt.open('r') as io:
line = [this_line.strip().replace('\xa0', ' ') for this_line in io]
temp_raw.clean()
sale_head = get_sale_head(line)
this_default_sale['sale_head'] = sale_head
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from distutils.version import LooseVersion
from typing import Any, Optional, List, Tuple
import numpy as np
import scipy.sparse as sp
from sklearn.multiclass import OneVsRestClassifier
from eli5.sklearn.unhashing import invert_hashing_and_fit, handle_hashing_vec
from eli5._feature_names import FeatureNames
def is_multiclass_classifier(clf):
# type: (Any) -> bool
"""
Return True if a classifier is multiclass or False if it is binary.
"""
return clf.coef_.shape[0] > 1
def is_multitarget_regressor(clf):
# type: (Any) -> bool
"""
Return True if a regressor is multitarget
or False if it predicts a single target.
"""
return len(clf.coef_.shape) > 1 and clf.coef_.shape[0] > 1
def is_probabilistic_classifier(clf):
# type: (Any) -> bool
""" Return True if a classifier can return probabilities """
if not hasattr(clf, 'predict_proba'):
return False
if isinstance(clf, OneVsRestClassifier):
# It currently has a predict_proba method, but does not check if
# wrapped estimator has a predict_proba method.
return hasattr(clf.estimator, 'predict_proba')
return True
def predict_proba(estimator, X):
# type: (Any, Any) -> Optional[np.ndarray]
""" Return result of predict_proba, if an estimator supports it, or None.
"""
if is_probabilistic_classifier(estimator):
try:
proba, = estimator.predict_proba(X)
return proba
except NotImplementedError:
return None
else:
return None
def has_intercept(estimator):
# type: (Any) -> bool
""" Return True if an estimator has intercept fit. """
if hasattr(estimator, 'fit_intercept'):
return estimator.fit_intercept
if hasattr(estimator, 'intercept_'):
if estimator.intercept_ is None:
return False
# scikit-learn sets intercept to zero vector if it is not fit
return np.any(estimator.intercept_)
return False
def get_feature_names(clf, vec=None, bias_name='<BIAS>', feature_names=None,
num_features=None, estimator_feature_names=None):
# type: (Any, Any, Optional[str], Any, int, Any) -> FeatureNames
"""
Return a FeatureNames instance that holds all feature names
and a bias feature.
If vec is None or doesn't have get_feature_names() method,
features are named x0, x1, x2, etc.
"""
if not has_intercept(clf):
bias_name = None
if feature_names is None:
if vec and hasattr(vec, 'get_feature_names'):
return FeatureNames(vec.get_feature_names(), bias_name=bias_name)
else:
if estimator_feature_names is None:
num_features = num_features or get_num_features(clf)
return FeatureNames(
n_features=num_features,
unkn_template='x%d',
bias_name=bias_name
)
return FeatureNames(estimator_feature_names, bias_name=bias_name)
num_features = num_features or get_num_features(clf)
if isinstance(feature_names, FeatureNames):
if feature_names.n_features != num_features:
raise ValueError("feature_names has a wrong n_features: "
"expected=%d, got=%d" % (num_features,
feature_names.n_features))
# Make a shallow copy setting proper bias_name
return FeatureNames(
feature_names.feature_names,
n_features=num_features,
bias_name=bias_name,
unkn_template=feature_names.unkn_template)
else:
if len(feature_names) != num_features:
raise ValueError("feature_names has a wrong length: "
"expected=%d, got=%d" % (num_features,
len(feature_names)))
return FeatureNames(feature_names, bias_name=bias_name)
def get_feature_names_filtered(clf, vec=None, bias_name='<BIAS>',
feature_names=None, num_features=None,
feature_filter=None, feature_re=None,
estimator_feature_names=None):
# type: (...) -> Tuple[FeatureNames, List[int]]
feature_names = get_feature_names(
clf=clf,
vec=vec,
bias_name=bias_name,
feature_names=feature_names,
num_features=num_features,
estimator_feature_names=estimator_feature_names,
)
return feature_names.handle_filter(feature_filter, feature_re)
def get_default_target_names(estimator, num_targets=None):
"""
Return a vector of target names: "y" if there is only one target,
and "y0", "y1", ... if there are multiple targets.
"""
if num_targets is None:
if len(estimator.coef_.shape) <= 1:
num_targets = 1
else:
num_targets, _ = estimator.coef_.shape
if num_targets == 1:
target_names = ['y']
else:
target_names = ['y%d' % i for i in range(num_targets)]
return np.array(target_names)
def get_coef(clf, label_id, scale=None):
"""
Return a vector of coefficients for a given label,
including bias feature.
``scale`` (optional) is a scaling vector; coef_[i] => coef[i] * scale[i] if
scale[i] is not nan. Intercept is not scaled.
"""
if len(clf.coef_.shape) == 2:
# Most classifiers (even in binary case) and regressors
coef = _dense_1d(clf.coef_[label_id])
elif len(clf.coef_.shape) == 1:
# SGDRegressor stores coefficients in a 1D array
if label_id != 0:
raise ValueError(
'Unexpected label_id %s for 1D coefficient' % label_id)
coef = _dense_1d(clf.coef_)
elif len(clf.coef_.shape) == 0:
# Lasso with one feature: 0D array
coef = np.array([clf.coef_])
else:
raise ValueError('Unexpected clf.coef_ shape: %s' % clf.coef_.shape)
if scale is not None:
if coef.shape != scale.shape:
raise ValueError("scale shape is incorrect: expected %s, got %s" % (
coef.shape, scale.shape,
))
# print("shape is ok")
not_nan = ~np.isnan(scale)
coef = coef.copy()
coef[not_nan] *= scale[not_nan]
if not has_intercept(clf):
return coef
if label_id == 0 and not isinstance(clf.intercept_, np.ndarray):
bias = clf.intercept_
else:
bias = clf.intercept_[label_id]
return np.hstack([coef, bias])
def _dense_1d(arr):
if not sp.issparse(arr):
return arr
return arr.toarray().reshape(-1)
def get_num_features(estimator):
""" Return size of a feature vector estimator expects as an input. """
if hasattr(estimator, 'coef_'): # linear models
if len(estimator.coef_.shape) == 0:
return 1
return estimator.coef_.shape[-1]
elif hasattr(estimator, 'feature_importances_'): # ensembles
return estimator.feature_importances_.shape[-1]
elif hasattr(estimator, 'feature_count_'): # naive bayes
return estimator.feature_count_.shape[-1]
elif hasattr(estimator, 'theta_'):
return estimator.theta_.shape[-1]
elif hasattr(estimator, 'estimators_') and len(estimator.estimators_):
# OvR
return get_num_features(estimator.estimators_[0])
else:
raise ValueError("Can't figure out feature vector size for %s" %
estimator)
try:
import pandas as pd
pandas_available = True
except ImportError:
pandas_available = False
def get_X(doc, vec=None, vectorized=False, to_dense=False):
if vec is None or vectorized:
if isinstance(doc, np.ndarray):
X = np.array([doc])
elif pandas_available and isinstance(doc, pd.Series):
# Convert to a DataFrame with a single row
X = doc.to_frame().transpose()
else:
X = doc
else:
X = vec.transform([doc])
if to_dense and sp.issparse(X):
X = X.toarray()
return X
def get_X0(X):
""" Return zero-th element of a one-element data container.
"""
if pandas_available and isinstance(X, pd.DataFrame):
assert len(X) == 1
x = np.array(X.iloc[0])
else:
x, = X
return x
def handle_vec(clf, doc, vec, vectorized, feature_names, num_features=None):
# type: (...) -> Tuple[Any, FeatureNames]
if not vectorized:
vec = invert_hashing_and_fit(vec, [doc])
if (vec is None and feature_names is None and
pandas_available and isinstance(doc, pd.Series)):
feature_names = list(doc.index)
# Explaining predictions does not need coef_scale
# because it is handled by the vectorizer.
feature_names = handle_hashing_vec(
vec, feature_names, coef_scale=None, with_coef_scale=False)
feature_names = get_feature_names(
clf, vec, feature_names=feature_names, num_features=num_features)
return vec, feature_names
def add_intercept(X):
""" Add intercept column to X """
intercept = np.ones((X.shape[0], 1))
if sp.issparse(X):
return sp.hstack([X, intercept]).tocsr()
else:
return np.hstack([X, intercept])
def sklearn_version():
"""Return sklearn version object which can be used for comparison. Usage:
>>> sklearn_version() > '0.17'
True
"""
from sklearn import __version__
return LooseVersion(__version__)
|
|
import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianModel
import pgmpy.tests.help_functions as hf
from pgmpy.factors import TabularCPD
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
BayesianModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node('a')
self.assertListEqual(self.G.nodes(), ['a'])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge('d', 'e')
self.assertListEqual(sorted(self.G.nodes()), ['d', 'e'])
self.assertListEqual(self.G.edges(), [('d', 'e')])
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, 'a', 'a')
def test_add_edge_result_cycle(self):
self.G.add_edges_from([('a', 'b'), ('a', 'c')])
self.assertRaises(ValueError, self.G.add_edge, 'c', 'a')
def test_add_edges_from_string(self):
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['b', 'c']])
self.G.add_nodes_from(['d', 'e', 'f'])
self.G.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.G.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'a')])
def test_add_edges_from_result_cycle(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'b'), ('b', 'c'), ('c', 'a')])
def test_update_node_parents_bm_constructor(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.g.predecessors('a'), [])
self.assertListEqual(self.g.predecessors('b'), ['a'])
self.assertListEqual(self.g.predecessors('c'), ['b'])
def test_update_node_parents(self):
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.G.predecessors('a'), [])
self.assertListEqual(self.G.predecessors('b'), ['a'])
self.assertListEqual(self.G.predecessors('c'), ['b'])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('a', 'd'), ('b', 'd'),
('d', 'e'), ('b', 'c')])
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')] or
(edge[1], edge[0]) in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')])
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianModel([('a', 'd'), ('d', 'e'), ('b', 'd'), ('b', 'c'), ('a', 'b')])
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')] or
(edge[1], edge[0]) in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')])
def tearDown(self):
del self.G
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('d', 'g'), ('i', 'g'), ('g', 'l'),
('i', 's')])
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d')), ['d', 'g', 'l'])
self.assertEqual(sorted(self.G.active_trail_nodes('i')), ['g', 'i', 'l', 's'])
def test_active_trail_nodes_args(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d', observed='g')), ['d', 'i', 's'])
self.assertEqual(sorted(self.G.active_trail_nodes('l', observed='g')), ['l'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['i', 'l'])), ['s'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['d', 'l'])), ['g', 'i', 's'])
def test_is_active_trail_triplets(self):
self.assertTrue(self.G.is_active_trail('d', 'l'))
self.assertTrue(self.G.is_active_trail('g', 's'))
self.assertFalse(self.G.is_active_trail('d', 'i'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='g'))
self.assertFalse(self.G.is_active_trail('d', 'l', observed='g'))
self.assertFalse(self.G.is_active_trail('i', 'l', observed='g'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='l'))
self.assertFalse(self.G.is_active_trail('g', 's', observed='i'))
def test_is_active_trail(self):
self.assertFalse(self.G.is_active_trail('d', 's'))
self.assertTrue(self.G.is_active_trail('s', 'l'))
self.assertTrue(self.G.is_active_trail('d', 's', observed='g'))
self.assertFalse(self.G.is_active_trail('s', 'l', observed='g'))
def test_is_active_trail_args(self):
self.assertFalse(self.G.is_active_trail('s', 'l', 'i'))
self.assertFalse(self.G.is_active_trail('s', 'l', 'g'))
self.assertTrue(self.G.is_active_trail('d', 's', 'l'))
self.assertFalse(self.G.is_active_trail('d', 's', ['i', 'l']))
def test_get_cpds(self):
cpd_d = TabularCPD('d', 2, np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, np.random.rand(2, 4), ['d', 'i'], [2, 2])
cpd_l = TabularCPD('l', 2, np.random.rand(2, 2), ['g'], 2)
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d').variable, 'd')
def test_get_cpds1(self):
self.model = BayesianModel([('A', 'AB')])
cpd_a = TabularCPD('A', 2, np.random.rand(2, 1))
cpd_ab = TabularCPD('AB', 2, np.random.rand(2, 2), evidence=['A'],
evidence_card=[2])
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds('A').variable, 'A')
self.assertEqual(self.model.get_cpds('AB').variable, 'AB')
def test_add_single_cpd(self):
from pgmpy.factors import TabularCPD
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
from pgmpy.factors import TabularCPD
cpd_d = TabularCPD('d', 2, np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, np.random.rand(2, 4), ['d', 'i'], [2, 2])
cpd_l = TabularCPD('l', 2, np.random.rand(2, 2), ['g'], 2)
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d'), cpd_d)
self.assertEqual(self.G.get_cpds('i'), cpd_i)
self.assertEqual(self.G.get_cpds('g'), cpd_g)
self.assertEqual(self.G.get_cpds('l'), cpd_l)
self.assertEqual(self.G.get_cpds('s'), cpd_s)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianModel()
self.model_disconnected.add_nodes_from(['A', 'B', 'C', 'D', 'E'])
self.model_connected = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
def test_disconnected_fit(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
self.model_disconnected.fit(values)
for node in ['A', 'B', 'C', 'D', 'E']:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (values.ix[:, node].value_counts() /
values.ix[:, node].value_counts().sum()).values
np_test.assert_array_equal(cpd.values, value)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop('E', axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(e_predict.values.ravel(),
np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,
1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,
1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 1, 0]))
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDirectedGraphCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianModel()
def test_add_single_cpd(self):
cpd = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff')
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff', 'grade')
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_cpd_for_node(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds('diff'), cpd1)
self.assertEqual(self.graph.get_cpds('intel'), cpd2)
self.assertEqual(self.graph.get_cpds('grade'), cpd3)
def test_get_cpd_raises_error(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, 'sat')
def tearDown(self):
del self.graph
|
|
'''Local.py - CGAT project specific functions
=============================================
The :mod:`Local` module contains various utility functions for working
on CGAT projects and are very specific to the CGAT directory layout.
.. note::
Methods in this module need to made to work with arbitrary project
layouts.
CGAT project layout
-------------------
The method :func:`isCGAT` checks if the code is executed within the
CGAT systems. The functions :func:`getProjectDirectories`,
:func:`getPipelineName`, :func:`getProjectId`, :func:`getProjectName`
provide information about the pipeline executed and the project context.
Publishing
-----------------
Once built, a report can be published by copying it to the publicly
visible directories on the CGAT systems. At the same time, references
to files on CGAT systems need to be replaced with links through the
public web interface. The functions :func:`getPublishDestinations` and
:func:`publish_report` implement this functionality.
The function :meth:`publish_tracks` builds a UCSC track hub and
moves it into the appropriate CGAT download directories.
Reference
---------
'''
import os
import re
import shutil
import inspect
import collections
import brewer2mpl
from CGAT import Experiment as E
import CGAT.IOTools as IOTools
from CGATPipelines.Pipeline.Parameters import loadParameters
PROJECT_ROOT = '/ifs/projects'
# Variables PARAMS and CONFIG will be set by Pipeline.py
# on import.
PARAMS = None
CONFIG = None
def isCGAT(curdir=None):
'''return True if this is a CGAT project.
This method works by checking if the current working directory
is part of :var:`PROJECT_ROOT`.
'''
if curdir is None:
curdir = os.path.abspath(os.getcwd())
return curdir.startswith(PROJECT_ROOT)
def getProjectDirectories(sections=None):
'''return directories relevant to this project.
The entries of the dictionary are:
webdir
Directory for publishing information (without password access).
exportdir
Directory for storing files to be exported alongside
the report.
notebookdir
Directory where project notebooks are located.
Arguments
---------
sections : list
If given, only the named sections are returned.
Returns
-------
directories : dict
Raises
------
ValueError
If any of the directories does not exist
'''
if not isCGAT():
raise ValueError(
"getProjectDirectories called for a non-CGAT project")
project_name = getProjectName()
result = {
'webdir': os.path.join(
PROJECT_ROOT, PARAMS["web_dir"]),
'exportdir': os.path.join(
PARAMS["exportdir"]),
'notebookdir': os.path.join(
PROJECT_ROOT, project_name, "notebooks")
}
if sections:
result = dict([(x, y) for x, y in list(result.items())
if x in sections])
for x, y in list(result.items()):
if not os.path.exists(y):
raise ValueError(
"directory %s for %s does not exist" % (y, x))
return result
def getPipelineName():
'''return the name of the pipeline.
The name of the pipeline is deduced by the name of the top-level
python script. The pipeline name is the name of the script
without any path information and the ``.py`` suffix.
Returns
-------
string
'''
# use the file attribute of the caller
for x in inspect.stack():
if x[0].f_globals["__name__"] == "__main__":
return os.path.basename(x[0].f_globals['__file__'])[:-3]
def getProjectId():
'''get the (obfuscated) project id based on the current working
directory.
The project is located by finding the ``web_dir`` configuration
variable and working backwards from that. ``web_dir`` should be
link to the web directory in the project directory which then
links to the web directory in the sftp directory which then links
to the obfuscated directory::
pipeline:web_dir
-> /ifs/projects/.../web
-> /ifs/sftp/.../web
-> /ifs/sftp/.../aoeuCATAa (obfuscated directory)
Returns
=======
string
'''
# return an id that has been explicitely set
if "report_project_url" in PARAMS:
return PARAMS["report_project_url"]
curdir = os.path.abspath(os.getcwd())
if not isCGAT(curdir):
raise ValueError(
"method getProjectId not called within %s" % PROJECT_ROOT)
webdir = PARAMS['web_dir']
if not os.path.islink(webdir):
raise ValueError(
"unknown configuration: webdir '%s' is not a link" % webdir)
target = os.readlink(webdir)
if not os.path.islink(target):
raise ValueError(
"unknown configuration: target '%s' is not a link" % target)
return os.path.basename(os.readlink(target))
def getProjectName():
'''cgat specific method: get the name of the project
based on the current working directory.
If called outside the Project hierarchy, the project name
will be set to the name of the current directory.
'''
curdir = os.path.abspath(os.getcwd())
if isCGAT(curdir):
prefixes = len(PROJECT_ROOT.split("/"))
return curdir.split("/")[prefixes]
else:
return os.path.basename(curdir)
def getPublishDestinations(prefix="", suffix=None):
"""cgat specific method : return path names of directories
for publishing.
Arguments
---------
prefix : string
Prefix to add to output directories.
suffix : suffix to add to output directories
Returns
-------
dest_report : string
Path for report to export
dest_export : string
Path for files to export
"""
if not prefix:
prefix = PARAMS.get("report_prefix", "default")
if prefix == "default":
prefix = getPipelineName() + "_"
if not suffix:
suffix = PARAMS.get("report_suffix", "")
dest_report = prefix + "report"
dest_export = prefix + "export"
if suffix is not None:
dest_report += suffix
dest_export += suffix
return dest_report, dest_export
def publish_report(prefix="",
patterns=[],
project_id=None,
prefix_project="/ifs/projects",
export_files=None,
suffix=None,
subdirs=False,
):
'''publish report into web directory.
Links export directory into web directory.
Copies html pages and fudges links to the pages in the
export directory.
If *prefix* is given, the directories will start with prefix,
otherwise, it is looked up from the option ``report_prefix``.
If report_prefix is "default", the prefix will be derived
from the pipeline name. For example, pipeline_intervals will
we copied to ``pipeline_intervals_report``.
*patterns* is an optional list of two-element tuples (<pattern>,
replacement_string). Each substitutions will be applied on each
file ending in .html.
If *project_id* is not given, it will be looked up. This requires
that this method is called within a subdirectory of PROJECT_ROOT.
*export_files* is a dictionary of files to be exported. The key
of the dictionary denotes the targetdirectory within the web
directory. The values in the dictionary are the files to be
linked to in the direcotry. For example::
exportfiles = {
"bamfiles" : glob.glob( "*/*.bam" ) + glob.glob( "*/*.bam.bai" ),
"bigwigfiles" : glob.glob( "*/*.bw" ),
}
.. note::
This function is CGAT specific.
'''
dest_report, dest_export = getPublishDestinations(prefix, suffix)
web_dir = PARAMS["web_dir"]
if project_id is None:
project_id = getProjectId()
src_export = os.path.abspath("export")
curdir = os.path.abspath(os.getcwd())
# substitute links to export and report
base_url = "http://www.cgat.org/downloads/%s" % project_id
_patterns = [
# redirect export directory
(re.compile(src_export),
"%(base_url)s/%(dest_export)s" % locals()),
# redirect report directory
(re.compile(curdir),
"%(base_url)s/%(dest_report)s" % locals()),
(re.compile('(%s)/_static' %
curdir),
"%(base_url)s/%(dest_report)s/_static" % locals())]
_patterns.extend(patterns)
# add intersphinx mapping - this requires that the name
# for the interpshinx redirection (key) corresponds to the
# export location with an appended "_report".
if CONFIG.has_section("intersphinx"):
for key, value in CONFIG.items("intersphinx"):
_patterns.append((
re.compile(os.path.abspath(value)),
"%(base_url)s/%(key)s_report" % locals()))
# check if the target exists in download location
intersphinx_target = os.path.join(
web_dir, key + "_report", "objects.inv")
if not os.path.exists(intersphinx_target):
E.warn("intersphinx mapping for '%s' does not exist at %s" %
(key, intersphinx_target))
def _link(src, dest):
'''create links.
Only link to existing targets.
'''
if os.path.exists(dest):
os.remove(dest)
if not os.path.exists(src):
E.warn("%s does not exist - skipped" % src)
return
# IMS: check if base path of dest exists. This allows for
# prefix to be a nested path structure e.g. project_id/
if not os.path.exists(os.path.dirname(os.path.abspath(dest))):
E.info('creating directory %s' %
os.path.dirname(os.path.abspath(dest)))
os.mkdir(os.path.dirname(os.path.abspath(dest)))
os.symlink(os.path.abspath(src), dest)
def _copy(src, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
if not os.path.exists(src):
E.warn("%s does not exist - skipped" % src)
return
shutil.copytree(os.path.abspath(src), dest)
# publish export dir via symlinking
E.info("linking export directory in %s" % dest_export)
_link(src_export,
os.path.abspath(os.path.join(web_dir, dest_export)))
# publish web pages by copying
E.info("publishing web pages in %s" %
os.path.abspath(os.path.join(web_dir, dest_report)))
_copy(os.path.abspath("report/html"),
os.path.abspath(os.path.join(web_dir, dest_report)))
for root, dirs, files in os.walk(os.path.join(web_dir, dest_report)):
for f in files:
fn = os.path.join(root, f)
if fn.endswith(".html"):
with open(fn) as inf:
data = inf.read()
for rx, repl in _patterns:
data = rx.sub(repl, data)
outf = open(fn, "w")
outf.write(data)
outf.close()
if export_files:
bigwigs, bams, beds = [], [], []
for targetdir, filenames in list(export_files.items()):
targetdir = os.path.join(web_dir, targetdir)
if not os.path.exists(targetdir):
os.makedirs(targetdir)
for src in filenames:
dest = os.path.join(targetdir, os.path.basename(src))
if dest.endswith(".bam"):
bams.append((targetdir, dest))
elif dest.endswith(".bw"):
bigwigs.append((targetdir, dest))
elif dest.endswith(".bed.gz"):
beds.append((targetdir, dest))
dest = os.path.abspath(dest)
if not os.path.exists(dest):
try:
os.symlink(os.path.abspath(src), dest)
except OSError as msg:
E.warn("could not create symlink from %s to %s: %s" %
(os.path.abspath(src), dest, msg))
# output ucsc links
with open("urls.txt", "w") as outfile:
for targetdir, fn in bams:
filename = os.path.basename(fn)
track = filename[:-len(".bam")]
outfile.write(
"""track type=bam name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
for targetdir, fn in bigwigs:
filename = os.path.basename(fn)
track = filename[:-len(".bw")]
outfile.write(
"""track type=bigWig name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
for targetdir, fn in beds:
filename = os.path.basename(fn)
track = filename[:-len(".bed.gz")]
outfile.write(
"""http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
E.info("UCSC urls are in urls.txt")
E.info(
"report has been published at http://www.cgat.org/downloads/%(project_id)s/%(dest_report)s" % locals())
def publish_tracks(export_files,
prefix="",
project_id=None,
project_name=None,
UCSC_ini=None):
'''publish a UCSC Track Hub.
This method takes a dictionary of file types associated
with files. For each file, a link will be created in
the upload directory. The track will be stored under
a project name, which will be derived from the location
of the working directory.
Information about the genome, the upload directory, etc. will be
taken from the global configuration dictionary.
For example, calling the following code in a pipeline executed
in .../proj013/mapping::
export_files = {
"bamfiles": glob.glob("*/*.bam") + glob.glob("*/*.bam.bai"),
"bigwigfiles": glob.glob("*/*.bw"),
}
publish_tracks(export_files)
will create a hub file at
:file:`<uploaddir>/OBFUSID/mapping/ucsc.hub`, where
OBFUSID is the obfuscated directory entry in the CGAT
download directory for a particular project.
If you want to create group tracks and get them to inherit from a
parent, you can supply an filename for a UCSC ini file. The ini
file defines two types of parameters, parents and set_features.
Parents define containers with a regex to identify the child
tracks. Set_features add additional features to all tracks
matching a regex. Parent and set_feature parameters are identified
by their respective "parent" or "set_features" prefixes.
For example, the following UCSC ini "test.ini" will create a
parent multiWig track called "Test" with the UCSC options as
defined in the values parameter. The values param must be a comma
separated list of key:value pairs which are seperated by a single
space. The regex param for parent_test defines the child tracks
which will be contained within "Test". The optional colour param
defines the colours for the child tracks. Colours are defined
using the brewer2mpl python module. Colour parameters must contain
the name of the pallete followed by the type of pallette.
The ini file below also defines a "set_features" parameter,
"bigwigs". Set_feature require a value and regex parameter. In
this case, the UCSC options in the values parameter will be added
to all tracks matching the ".*bigwig$" regex. As above, the values
param must be a comma separated list of key:value pairs which are
seperated by a single space. As above, an optional colours
parameter can also be given.
Note: colour palletes have a maximum number of allowable colours.
To see the available palletes and their size, run:
>import brewer2mpl
>brewer2mpl.print_maps()
>cat test.ini
#######################
#######################
[parent_test]
values=container multiWig,bigDataUrl Test,shortLabel Test,longLabel Test,type bigWig,viewLimits 0:160,visibility full,aggregate transparentOverlay,showSubtrackColorOnUi on,windowingFunction maximum,priority 1.2,configurable on,autoScale on,dragAndDrop subtracks
regex=.*-Saline-.*bw$
colour=Blues,Sequential
#######################
[set_features_bigwigs]
values=configurable on,autoScale on,useScore on,visibility full
regex=.*bigwig$
colour=Oranges,Sequential
#######################
#######################
Arguments
---------
export_files : dict
Dictionary mapping filetypes to files.
prefix : string
will be added to each track.
project_id : string
The project identifier. If not given, it will be taken from
the path of the project directory.
project_name : string
The project name, typically the project number. If not given,
it will be taken from the current directory.
'''
# the import is located here to avoid cyclical dependencies
# between Local.py, Pipeline.py and PipelineUCSC.py
import CGATPipelines.PipelineUCSC as PipelineUCSC
if not prefix:
prefix = PARAMS.get("report_prefix", "")
if not UCSC_ini:
UCSC_ini = PARAMS.get("ucsc_ini", None)
web_dir = PARAMS["web_dir"]
if project_id is None:
project_id = getProjectId()
if project_name is None:
project_name = getProjectName()
src_export = os.path.abspath("export")
dest_report = prefix + "report"
dest_export = prefix + "export"
hubdir = os.path.join(PARAMS["web_dir"], "ucsc")
if not os.path.exists(hubdir):
E.info("creating %s" % hubdir)
os.mkdir(hubdir)
# write the UCSC hub file
hubfile = os.path.join(hubdir, "hub.txt")
genomesfile = os.path.join(hubdir, "genomes.txt")
trackdir = os.path.join(hubdir, PARAMS["genome"])
trackfile = os.path.join(hubdir, PARAMS["genome"], "trackDb.txt")
trackrelpath = os.path.join(PARAMS["genome"], "trackDb.txt")
if os.path.exists(hubfile):
with IOTools.openFile(hubfile) as infile:
hubdata = PipelineUCSC.readUCSCFile(infile)
else:
hubdata = [('hub', "CGAT-" + project_name),
('shortLabel', "CGAT-" + project_name),
('longLabel', "Data for CGAT project %s" % project_name),
('genomesFile', "genomes.txt"),
('email', 'andreas.heger@gmail.com')]
E.info("writing to %s" % hubfile)
with IOTools.openFile(hubfile, "w") as outfile:
PipelineUCSC.writeUCSCFile(outfile, hubdata)
# create the genomes.txt file - append to it if necessary.
if os.path.exists(genomesfile):
with IOTools.openFile(genomesfile) as infile:
genomes = PipelineUCSC.readUCSCFile(infile)
else:
genomes = []
if ("genome", PARAMS["genome"]) not in genomes:
genomes.append(("genome", PARAMS["genome"]))
genomes.append(("trackDb", trackrelpath))
E.info("writing to %s" % genomesfile)
with IOTools.openFile(genomesfile, "w") as outfile:
PipelineUCSC.writeUCSCFile(outfile, genomes)
# create the track data
if not os.path.exists(trackdir):
os.mkdir(trackdir)
if os.path.exists(trackfile):
E.debug('reading existing tracks from %s' % trackfile)
with IOTools.openFile(trackfile) as infile:
tracks = PipelineUCSC.readTrackFile(infile)
else:
tracks = []
tracks = collections.OrderedDict(tracks)
def getName(name):
if name.endswith(".bam"):
return "bam", name
elif name.endswith(".bw") or name.endswith(".bigwig"):
return "bigWig", name
elif name.endswith(".bb") or name.endswith(".bigbed"):
return "bigBed", name
else:
return None, None
for targetdir, filenames in list(export_files.items()):
for src in filenames:
dest = os.path.join(trackdir, prefix + os.path.basename(src))
dest = os.path.abspath(dest)
# create a symlink
if not os.path.exists(dest):
try:
os.symlink(os.path.abspath(src), dest)
except OSError as msg:
E.warn("could not create symlink from %s to %s: %s" %
(os.path.abspath(src), dest, msg))
ucsctype, trackname = getName(os.path.basename(dest))
# ignore invalid types and other files (.bai files, ...)
if ucsctype is None:
continue
tracks[trackname] = (("bigDataUrl", os.path.basename(dest)),
("shortLabel", trackname),
("longLabel", trackname),
("type", ucsctype))
if UCSC_ini:
UCSC_PARAMS = loadParameters(UCSC_ini)
for param, values in UCSC_PARAMS.items():
children = []
# find "parent" params
if re.match("parent_.*_values", param):
make_group = True
name = param.replace("_values", "")
regex = UCSC_PARAMS[name + "_regex"]
for targetdir, filenames in list(export_files.items()):
for src in filenames:
dest = prefix + os.path.basename(src)
if re.match(regex, dest):
children.append(dest)
# find "set features" params
elif re.match("set_features_.*_regex", param):
make_group = False
regex = UCSC_PARAMS[param]
name = param.replace("_regex", "")
for targetdir, filenames in list(export_files.items()):
for src in filenames:
dest = prefix + os.path.basename(src)
if re.match(regex, dest):
children.append(dest)
else:
continue
if name + "_colour" in list(UCSC_PARAMS.keys()):
colour, colour_type = UCSC_PARAMS[name + "_colour"].split(",")
try:
colours = brewer2mpl.get_map(
colour, colour_type, max(3, len(children)))
except ValueError as error:
print(("Could not set colours for %s. See error message"
"%s" % (",".join(children), error)))
colours = colours.colors
# make the colours a shade darker
colours = [[max(0, y - 25) for y in x] for x in colours]
else:
colours = None
for n, child in enumerate(children):
if make_group:
# make a parent and a copy of the child so we have
# two tracks, one grouped, one by itself
values = UCSC_PARAMS[param]
tracks[name] = [x.split(" ") for x in values.split(",")]
group_trackname = child + "_grouped"
tracks[group_trackname] = tracks[child]
tracks[group_trackname] += (("parent", name),)
else:
# just add the values to the child
values = UCSC_PARAMS[name + "_values"]
tracks[child] += tuple([x.split(" ") for x in values.split(",")])
if colours:
rgb = ",".join(map(str, colours[n]))
tracks[child] += (("color", rgb),)
if make_group:
tracks[group_trackname] += (("color", rgb),)
E.info("writing to %s" % trackfile)
with IOTools.openFile(trackfile, "w") as outfile:
PipelineUCSC.writeTrackFile(outfile, list(tracks.items()))
E.info(
"data hub has been created at http://www.cgat.org/downloads/%(project_id)s/ucsc/hub.txt" % locals())
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
import os
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Type
from pants.base.build_environment import get_default_pants_config_file
from pants.option.config import Config
from pants.option.custom_types import ListValueComponent
from pants.option.global_options import GlobalOptions
from pants.option.optionable import Optionable
from pants.option.options import Options
from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo
from pants.util.dirutil import read_file
from pants.util.memo import memoized_method, memoized_property
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import ensure_text
logger = logging.getLogger(__name__)
# This is a temporary hack that allows us to note the fact that we're in v2-exclusive mode
# in a static location, as soon as we know it. This way code that cannot access options
# can still use this information to customize behavior. Again, this is a temporary hack
# to provide a better v2 experience to users who are not (and possibly never have been)
# running v1, and should go away ASAP.
class IsV2Exclusive:
def __init__(self):
self._value = False
def set(self):
self._value = True
def __bool__(self):
return self._value
is_v2_exclusive = IsV2Exclusive()
@dataclass(frozen=True)
class OptionsBootstrapper:
"""Holds the result of the first stage of options parsing, and assists with parsing full
options."""
env_tuples: Tuple[Tuple[str, str], ...]
bootstrap_args: Tuple[str, ...]
args: Tuple[str, ...]
config: Config
@staticmethod
def get_config_file_paths(env, args) -> List[str]:
"""Get the location of the config files.
The locations are specified by the --pants-config-files option. However we need to load the
config in order to process the options. This method special-cases --pants-config-files
in order to solve this chicken-and-egg problem.
Note that, obviously, it's not possible to set the location of config files in a config file.
Doing so will have no effect.
"""
# This exactly mirrors the logic applied in Option to all regular options. Note that we'll
# also parse --pants-config as a regular option later, but there's no harm in that. In fact,
# it's preferable, so that any code that happens to want to know where we read config from
# can inspect the option.
flag = "--pants-config-files="
evars = [
"PANTS_GLOBAL_PANTS_CONFIG_FILES",
"PANTS_PANTS_CONFIG_FILES",
"PANTS_CONFIG_FILES",
]
path_list_values = []
default = get_default_pants_config_file()
if Path(default).is_file():
path_list_values.append(ListValueComponent.create(default))
for var in evars:
if var in env:
path_list_values.append(ListValueComponent.create(env[var]))
break
for arg in args:
# Technically this is very slightly incorrect, as we don't check scope. But it's
# very unlikely that any task or subsystem will have an option named --pants-config-files.
# TODO: Enforce a ban on options with a --pants- prefix outside our global options?
if arg.startswith(flag):
path_list_values.append(ListValueComponent.create(arg[len(flag) :]))
return ListValueComponent.merge(path_list_values).val
@staticmethod
def parse_bootstrap_options(
env: Mapping[str, str], args: Sequence[str], config: Config
) -> Options:
bootstrap_options = Options.create(
env=env, config=config, known_scope_infos=[GlobalOptions.get_scope_info()], args=args,
)
def register_global(*args, **kwargs):
## Only use of Options.register?
bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs)
GlobalOptions.register_bootstrap_options(register_global)
opts = bootstrap_options.for_global_scope()
if opts.v2 and not opts.v1 and opts.backend_packages == []:
is_v2_exclusive.set()
return bootstrap_options
@classmethod
def create(
cls, env: Optional[Mapping[str, str]] = None, args: Optional[Sequence[str]] = None,
) -> "OptionsBootstrapper":
"""Parses the minimum amount of configuration necessary to create an OptionsBootstrapper.
:param env: An environment dictionary, or None to use `os.environ`.
:param args: An args array, or None to use `sys.argv`.
"""
env = {
k: v for k, v in (os.environ if env is None else env).items() if k.startswith("PANTS_")
}
args = tuple(sys.argv if args is None else args)
flags = set()
short_flags = set()
# We can't use pants.engine.fs.FileContent here because it would cause a circular dep.
@dataclass(frozen=True)
class FileContent:
path: str
content: bytes
def filecontent_for(path: str) -> FileContent:
return FileContent(ensure_text(path), read_file(path, binary_mode=True),)
def capture_the_flags(*args: str, **kwargs) -> None:
for arg in args:
flags.add(arg)
if len(arg) == 2:
short_flags.add(arg)
elif kwargs.get("type") == bool:
flags.add(f"--no-{arg[2:]}")
GlobalOptions.register_bootstrap_options(capture_the_flags)
def is_bootstrap_option(arg: str) -> bool:
components = arg.split("=", 1)
if components[0] in flags:
return True
for flag in short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = tuple(
filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != "--", args))
)
config_file_paths = cls.get_config_file_paths(env=env, args=args)
config_files_products = [filecontent_for(p) for p in config_file_paths]
pre_bootstrap_config = Config.load_file_contents(config_files_products)
initial_bootstrap_options = cls.parse_bootstrap_options(env, bargs, pre_bootstrap_config)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.toml), then config override, then rcfiles.
full_config_paths = pre_bootstrap_config.sources()
if bootstrap_option_values.pantsrc:
rcfiles = [
os.path.expanduser(str(rcfile)) for rcfile in bootstrap_option_values.pantsrc_files
]
existing_rcfiles = list(filter(os.path.exists, rcfiles))
full_config_paths.extend(existing_rcfiles)
full_config_files_products = [filecontent_for(p) for p in full_config_paths]
post_bootstrap_config = Config.load_file_contents(
full_config_files_products, seed_values=bootstrap_option_values.as_dict(),
)
env_tuples = tuple(sorted(env.items(), key=lambda x: x[0]))
return cls(
env_tuples=env_tuples, bootstrap_args=bargs, args=args, config=post_bootstrap_config
)
@memoized_property
def env(self) -> Dict[str, str]:
return dict(self.env_tuples)
@memoized_property
def bootstrap_options(self) -> Options:
"""The post-bootstrap options, computed from the env, args, and fully discovered Config.
Re-computing options after Config has been fully expanded allows us to pick up bootstrap values
(such as backends) from a config override file, for example.
Because this can be computed from the in-memory representation of these values, it is not part
of the object's identity.
"""
return self.parse_bootstrap_options(self.env, self.bootstrap_args, self.config)
def get_bootstrap_options(self) -> Options:
"""Returns an Options instance that only knows about the bootstrap options."""
return self.bootstrap_options
@memoized_method
def _full_options(self, known_scope_infos: FrozenOrderedSet[ScopeInfo]) -> Options:
bootstrap_option_values = self.get_bootstrap_options().for_global_scope()
options = Options.create(
self.env,
self.config,
known_scope_infos,
args=self.args,
bootstrap_option_values=bootstrap_option_values,
)
distinct_optionable_classes: Set[Type[Optionable]] = set()
for ksi in known_scope_infos:
if not ksi.optionable_cls or ksi.optionable_cls in distinct_optionable_classes:
continue
distinct_optionable_classes.add(ksi.optionable_cls)
ksi.optionable_cls.register_options_on_scope(options)
return options
def get_full_options(self, known_scope_infos: Iterable[ScopeInfo]) -> Options:
"""Get the full Options instance bootstrapped by this object for the given known scopes.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:returns: A bootrapped Options instance that also carries options for all the supplied known
scopes.
"""
return self._full_options(
FrozenOrderedSet(sorted(known_scope_infos, key=lambda si: si.scope))
)
def verify_configs_against_options(self, options: Options) -> None:
"""Verify all loaded configs have correct scopes and options.
:param options: Fully bootstrapped valid options.
"""
error_log = []
for config in self.config.configs():
for section in config.sections():
scope = GLOBAL_SCOPE if section == GLOBAL_SCOPE_CONFIG_SECTION else section
try:
valid_options_under_scope = set(
options.for_scope(scope, include_passive_options=True)
)
# Only catch ConfigValidationError. Other exceptions will be raised directly.
except Config.ConfigValidationError:
error_log.append(f"Invalid scope [{section}] in {config.config_path}")
else:
# All the options specified under [`section`] in `config` excluding bootstrap defaults.
all_options_under_scope = set(config.values.options(section)) - set(
config.values.defaults
)
for option in sorted(all_options_under_scope):
if option not in valid_options_under_scope:
error_log.append(
f"Invalid option '{option}' under [{section}] in {config.config_path}"
)
if error_log:
for error in error_log:
logger.error(error)
raise Config.ConfigValidationError(
"Invalid config entries detected. See log for details on which entries to update or "
"remove.\n(Specify --no-verify-config to disable this check.)"
)
|
|
#!/usr/bin/env python
from argparse import ArgumentParser
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TransactionTestCase, TestCase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils._os import upath
from django.utils import six
warnings.simplefilter("default", RemovedInDjango19Warning)
warnings.simplefilter("default", RemovedInDjango20Warning)
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'data',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
def get_test_modules():
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
if HAS_SPATIAL_DB:
discovery_paths.append(
('django.contrib.gis.tests', os.path.join(CONTRIB_DIR, 'gis', 'tests'))
)
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
if not connection.vendor == 'postgresql' and f == 'postgres_tests':
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
# Ensure the middleware classes are seen as overridden otherwise we get a compatibility warning.
settings._explicit_settings.add('MIDDLEWARE_CLASSES')
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'django.contrib.contenttypes.tests.migrations',
}
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
RemovedInDjango19Warning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options = parser.parse_args()
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.modules)
if failures:
sys.exit(bool(failures))
|
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.contrib.postgres.search import SearchVectorField
from django.contrib.postgres.indexes import GinIndex
from opencivicdata.core.models.base import (
OCDBase,
LinkBase,
OCDIDField,
RelatedBase,
RelatedEntityBase,
MimetypeLinkBase,
IdentifierBase,
)
from opencivicdata.core.models import Organization
from .session import LegislativeSession
from ... import common
class Bill(OCDBase):
id = OCDIDField(ocd_type="bill")
legislative_session = models.ForeignKey(
LegislativeSession,
related_name="bills",
# sessions should be hard to delete
on_delete=models.PROTECT,
)
identifier = models.CharField(max_length=100)
title = models.TextField()
from_organization = models.ForeignKey(
Organization,
related_name="bills",
null=True,
# chambers should be hard to delete
on_delete=models.PROTECT,
)
# check that array values are in enum?
classification = ArrayField(base_field=models.TextField(), blank=True, default=list)
subject = ArrayField(base_field=models.TextField(), blank=True, default=list)
def __str__(self):
return "{} in {}".format(self.identifier, self.legislative_session)
class Meta:
db_table = "opencivicdata_bill"
index_together = [["from_organization", "legislative_session", "identifier"]]
class BillAbstract(RelatedBase):
bill = models.ForeignKey(Bill, related_name="abstracts", on_delete=models.CASCADE)
abstract = models.TextField()
note = models.TextField(blank=True)
date = models.TextField(max_length=10, blank=True) # YYYY[-MM[-DD]]
def __str__(self):
return "{0} abstract".format(self.bill.identifier)
class Meta:
db_table = "opencivicdata_billabstract"
class BillTitle(RelatedBase):
bill = models.ForeignKey(
Bill, related_name="other_titles", on_delete=models.CASCADE
)
title = models.TextField()
note = models.TextField(blank=True)
def __str__(self):
return "{0} ({1})".format(self.title, self.bill.identifier)
class Meta:
db_table = "opencivicdata_billtitle"
class BillIdentifier(IdentifierBase):
bill = models.ForeignKey(
Bill, related_name="other_identifiers", on_delete=models.CASCADE
)
note = models.TextField(blank=True)
class Meta:
db_table = "opencivicdata_billidentifier"
class BillAction(RelatedBase):
bill = models.ForeignKey(Bill, related_name="actions", on_delete=models.CASCADE)
organization = models.ForeignKey(
Organization,
related_name="actions",
# don't let an org delete wipe out a bunch of bill actions
on_delete=models.PROTECT,
)
description = models.TextField()
date = models.CharField(max_length=25) # YYYY-MM-DD HH:MM:SS+HH:MM
classification = ArrayField(
base_field=models.TextField(), blank=True, default=list
) # enum
order = models.PositiveIntegerField()
extras = JSONField(default=dict, blank=True)
class Meta:
db_table = "opencivicdata_billaction"
ordering = ["order"]
def __str__(self):
return "{0} action on {1}".format(self.bill.identifier, self.date)
class BillActionRelatedEntity(RelatedEntityBase):
action = models.ForeignKey(
BillAction, related_name="related_entities", on_delete=models.CASCADE
)
def __str__(self):
return "{0} related to {1}".format(self.entity_name, self.action)
class Meta:
db_table = "opencivicdata_billactionrelatedentity"
class RelatedBill(RelatedBase):
bill = models.ForeignKey(
Bill, related_name="related_bills", on_delete=models.CASCADE
)
related_bill = models.ForeignKey(
Bill,
related_name="related_bills_reverse",
null=True,
# if related bill goes away, just unlink the relationship
on_delete=models.SET_NULL,
)
identifier = models.CharField(max_length=100)
# not a FK in case we don't know the session yet
legislative_session = models.CharField(max_length=100)
relation_type = models.CharField(
max_length=100, choices=common.BILL_RELATION_TYPE_CHOICES
)
def __str__(self):
return "relationship of {} to {} ({})".format(
self.bill, self.related_bill, self.relation_type
)
class Meta:
db_table = "opencivicdata_relatedbill"
class BillSponsorship(RelatedEntityBase):
bill = models.ForeignKey(
Bill, related_name="sponsorships", on_delete=models.CASCADE
)
primary = models.BooleanField(default=False)
classification = models.CharField(max_length=100) # enum?
def __str__(self):
return "{} ({}) sponsorship of {}".format(
self.name, self.entity_type, self.bill
)
class Meta:
db_table = "opencivicdata_billsponsorship"
class BillDocument(RelatedBase):
bill = models.ForeignKey(Bill, related_name="documents", on_delete=models.CASCADE)
note = models.CharField(max_length=300)
date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
extras = JSONField(default=dict, blank=True)
def __str__(self):
return "{0} document of {1}".format(self.date, self.bill)
class Meta:
db_table = "opencivicdata_billdocument"
class BillVersion(RelatedBase):
bill = models.ForeignKey(Bill, related_name="versions", on_delete=models.CASCADE)
note = models.CharField(max_length=300)
date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
extras = JSONField(default=dict, blank=True)
def __str__(self):
return "{0} version of {1}".format(self.date, self.bill)
class Meta:
db_table = "opencivicdata_billversion"
class BillDocumentLink(MimetypeLinkBase):
document = models.ForeignKey(
BillDocument, related_name="links", on_delete=models.CASCADE
)
def __str__(self):
return "{0} for {1}".format(self.url, self.document.bill)
class Meta:
db_table = "opencivicdata_billdocumentlink"
class BillVersionLink(MimetypeLinkBase):
version = models.ForeignKey(
BillVersion, related_name="links", on_delete=models.CASCADE
)
def __str__(self):
return "{0} for {1}".format(self.url, self.version)
class Meta:
db_table = "opencivicdata_billversionlink"
class BillSource(LinkBase):
bill = models.ForeignKey(Bill, related_name="sources", on_delete=models.CASCADE)
class Meta:
db_table = "opencivicdata_billsource"
class SearchableBill(models.Model):
"""
This model associates a single version's text with a given bill.
This is done for a few reasons:
* bills with multiple versions aren't weighted more heavily than others
* this makes querying quite a bit more efficient (no need to deduplicate results)
We'll also store error results, assuming that they're somewhat persistent.
"""
bill = models.OneToOneField(
Bill, related_name="searchable", null=True, on_delete=models.CASCADE
)
version_link = models.OneToOneField(
BillVersionLink, related_name="searchable", null=True, on_delete=models.CASCADE
)
search_vector = SearchVectorField(default=None)
all_titles = models.TextField(default="")
raw_text = models.TextField(default="")
is_error = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "opencivicdata_searchablebill"
indexes = [GinIndex(name="search_index", fields=["search_vector"])]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import migrate_server
from nova.api.openstack.compute import servers
from nova.compute import api as compute
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.network import model
from nova.network import neutron
from nova import objects
from nova.objects import fields
from nova.objects.instance_group import InstanceGroup
from nova.policies import base as base_policy
from nova.policies import extended_server_attributes as ea_policies
from nova.policies import servers as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
CONF = nova.conf.CONF
class ServersPolicyTest(base.BasePolicyTest):
"""Test Servers APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ServersPolicyTest, self).setUp()
self.controller = servers.ServersController()
self.m_controller = migrate_server.MigrateServerController()
self.rule_trusted_certs = policies.SERVERS % 'create:trusted_certs'
self.rule_attach_network = policies.SERVERS % 'create:attach_network'
self.rule_attach_volume = policies.SERVERS % 'create:attach_volume'
self.rule_requested_destination = policies.REQUESTED_DESTINATION
self.rule_forced_host = policies.SERVERS % 'create:forced_host'
self.req = fakes.HTTPRequest.blank('')
user_id = self.req.environ['nova.context'].user_id
self.controller._view_builder._add_security_grps = mock.MagicMock()
self.controller._view_builder._get_metadata = mock.MagicMock()
self.controller._view_builder._get_addresses = mock.MagicMock()
self.controller._view_builder._get_host_id = mock.MagicMock()
self.controller._view_builder._get_fault = mock.MagicMock()
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuids.fake_id, project_id=self.project_id,
user_id=user_id, vm_state=vm_states.ACTIVE,
system_metadata={}, expected_attrs=['system_metadata'])
self.mock_flavor = self.useFixture(
fixtures.MockPatch('nova.compute.flavors.get_flavor_by_flavor_id'
)).mock
self.mock_flavor.return_value = fake_flavor.fake_flavor_obj(
self.req.environ['nova.context'], flavorid='1')
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
self.mock_get.return_value = self.instance
self.mock_get_instance = self.useFixture(fixtures.MockPatchObject(
self.controller, '_get_instance')).mock
self.mock_get_instance.return_value = self.instance
self.mock_get_network_info = self.useFixture(
fixtures.MockPatch('nova.objects.Instance.get_network_info')).mock
self.mock_get_network_info.return_value = model.NetworkInfo()
self.servers = [fakes.stub_instance_obj(
1, vm_state=vm_states.ACTIVE, uuid=uuids.fake,
project_id=self.project_id, user_id='user1'),
fakes.stub_instance_obj(
2, vm_state=vm_states.ACTIVE, uuid=uuids.fake,
project_id='proj2', user_id='user2')]
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])
self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
self.controller.compute_api, 'get_all')).mock
self.body = {
'server': {
'name': 'server_test',
'imageRef': uuids.fake_id,
'flavorRef': uuids.fake_id,
},
}
self.extended_attr = ['OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name',
'OS-EXT-SRV-ATTR:user_data'
]
# Users that can take action on *our* project resources
self.project_action_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
])
# Users that can read *our* project resources
self.project_reader_authorized_contexts = (
self.project_action_authorized_contexts)
# Users that _see_ project-scoped resources that they own
self.everyone_authorized_contexts = set(self.all_contexts)
# Users that can _do_ things to project-scoped resources they own
self.project_member_authorized_contexts = set(self.all_contexts)
# Users able to do admin things on project resources
self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context])
# Admin (for APIs does not pass the project id as policy target
# for example, create server, list detail server) able to get
# all projects servers, create server on specific host etc.
# This is admin on any project because policy does not check
# the project id but they will be able to create server, get
# servers(unless all-tenant policy is allowed) of their own
# project only.
self.all_projects_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context])
# Users able to do cross-cell migrations
self.cross_cell_authorized_contexts = []
def test_index_server_policy(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
if 'project_id' in search_opts or 'user_id' in search_opts:
return objects.InstanceList(objects=self.servers)
else:
raise
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'index'
self.common_policy_auth(
self.everyone_authorized_contexts,
rule_name,
self.controller.index,
self.req)
def test_index_all_project_server_policy(self):
# 'index' policy is checked before 'index:get_all_tenants' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.SERVERS % 'index:get_all_tenants'
req = fakes.HTTPRequest.blank('/servers?all_tenants')
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertNotIn('project_id', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
if not CONF.oslo_policy.enforce_scope:
check_rule = rule_name
else:
check_rule = functools.partial(
base.rule_if_system, rule, rule_name)
self.common_policy_auth(self.all_projects_admin_authorized_contexts,
check_rule,
self.controller.index,
req)
@mock.patch('nova.compute.api.API.get_all')
def test_detail_list_server_policy(self, mock_get):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
if 'project_id' in search_opts or 'user_id' in search_opts:
return objects.InstanceList(objects=self.servers)
else:
raise
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'detail'
self.common_policy_auth(
self.everyone_authorized_contexts,
rule_name,
self.controller.detail,
self.req)
def test_detail_list_all_project_server_policy(self):
# 'detail' policy is checked before 'detail:get_all_tenants' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'detail'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.SERVERS % 'detail:get_all_tenants'
req = fakes.HTTPRequest.blank('/servers?all_tenants')
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertNotIn('project_id', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
if not CONF.oslo_policy.enforce_scope:
check_rule = rule_name
else:
check_rule = functools.partial(
base.rule_if_system, rule, rule_name)
self.common_policy_auth(self.all_projects_admin_authorized_contexts,
check_rule,
self.controller.detail,
req)
def test_index_server_allow_all_filters_policy(self):
# 'index' policy is checked before 'allow_all_filters' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
self.common_policy_auth(
self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.index,
req, fatal=False)
def test_detail_server_allow_all_filters_policy(self):
# 'detail' policy is checked before 'allow_all_filters' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'detail'
self.policy.set_rules({rule: "@"}, overwrite=False)
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
self.common_policy_auth(
self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.detail,
req, fatal=False)
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
def test_show_server_policy(self, mock_bdm):
rule_name = policies.SERVERS % 'show'
# Show includes readers
self.common_policy_auth(
self.project_reader_authorized_contexts,
rule_name,
self.controller.show,
self.req, self.instance.uuid)
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
rule = policies.SERVERS % 'show'
# server 'show' policy is checked before flavor extra specs
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.SERVERS % 'show:flavor-extra-specs'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_reader_authorized_contexts,
rule_name, self.controller.show, req,
self.instance.uuid, fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['server']['flavor'])
@mock.patch('nova.compute.api.API.get_all')
def test_server_detail_with_extra_specs_policy(self, mock_get):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
if 'project_id' in search_opts or 'user_id' in search_opts:
return objects.InstanceList(objects=self.servers)
else:
raise
self.mock_get_all.side_effect = fake_get_all
rule = policies.SERVERS % 'detail'
# server 'detail' policy is checked before flavor extra specs
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.SERVERS % 'show:flavor-extra-specs'
authorize_res, unauthorize_res = self.common_policy_auth(
self.everyone_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['servers'][0]['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch('nova.compute.api.API.rebuild')
def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
mock_get, mock_bdm):
rule = policies.SERVERS % 'rebuild'
# server 'rebuild' policy is checked before flavor extra specs
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.SERVERS % 'show:flavor-extra-specs'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_reader_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp.obj['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
def test_server_update_with_extra_specs_policy(self,
mock_update, mock_group, mock_bdm):
mock_update.return_value = self.instance
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before flavor extra specs
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.SERVERS % 'show:flavor-extra-specs'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_reader_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['server']['flavor'])
@mock.patch('nova.compute.api.API.create')
def test_create_server_policy(self, mock_create):
mock_create.return_value = ([self.instance], '')
rule_name = policies.SERVERS % 'create'
self.common_policy_auth(self.project_member_authorized_contexts,
rule_name,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
@mock.patch('nova.compute.api.API.parse_availability_zone')
@mock.patch.object(
servers.ServersController, '_validate_host_availability_zone',
new=mock.Mock(return_value=None))
def test_create_forced_host_server_policy(self, mock_az, mock_create):
# 'create' policy is checked before 'create:forced_host' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
mock_create.return_value = ([self.instance], '')
mock_az.return_value = ('test', 'host', None)
self.common_policy_auth(self.all_projects_admin_authorized_contexts,
self.rule_forced_host,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_volume_server_policy(self, mock_create):
# 'create' policy is checked before 'create:attach_volume' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
mock_create.return_value = ([self.instance], '')
body = {
'server': {
'name': 'server_test',
'imageRef': uuids.fake_id,
'flavorRef': uuids.fake_id,
'block_device_mapping': [{'device_name': 'foo'}],
},
}
self.common_policy_auth(self.project_member_authorized_contexts,
self.rule_attach_volume,
self.controller.create,
self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_network_server_policy(self, mock_create):
# 'create' policy is checked before 'create:attach_network' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
mock_create.return_value = ([self.instance], '')
body = {
'server': {
'name': 'server_test',
'imageRef': uuids.fake_id,
'flavorRef': uuids.fake_id,
'networks': [{
'uuid': uuids.fake_id
}],
},
}
self.common_policy_auth(self.project_member_authorized_contexts,
self.rule_attach_network,
self.controller.create,
self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_trusted_certs_server_policy(self, mock_create):
# 'create' policy is checked before 'create:trusted_certs' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.63')
mock_create.return_value = ([self.instance], '')
body = {
'server': {
'name': 'server_test',
'imageRef': uuids.fake_id,
'flavorRef': uuids.fake_id,
'trusted_image_certificates': [uuids.fake_id],
'networks': [{
'uuid': uuids.fake_id
}],
},
}
self.common_policy_auth(self.project_member_authorized_contexts,
self.rule_trusted_certs,
self.controller.create,
req, body=body)
@mock.patch('nova.compute.api.API.delete')
def test_delete_server_policy(self, mock_delete):
rule_name = policies.SERVERS % 'delete'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller.delete,
self.req, self.instance.uuid)
def test_delete_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'delete'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller.delete,
req, self.instance.uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.delete')
def test_delete_server_overridden_policy_pass_with_same_user(
self, mock_delete):
rule_name = policies.SERVERS % 'delete'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
self.controller.delete(self.req,
self.instance.uuid)
@mock.patch('nova.compute.api.API.update_instance')
def test_update_server_policy(self, mock_update):
rule_name = policies.SERVERS % 'update'
body = {'server': {'name': 'test'}}
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller.update,
self.req, self.instance.uuid, body=body)
def test_update_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'update'
body = {'server': {'name': 'test'}}
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller.update,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.update_instance')
def test_update_server_overridden_policy_pass_with_same_user(
self, mock_update):
rule_name = policies.SERVERS % 'update'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'server': {'name': 'test'}}
self.controller.update(self.req,
self.instance.uuid, body=body)
@mock.patch('nova.compute.api.API.confirm_resize')
def test_confirm_resize_server_policy(self, mock_confirm_resize):
rule_name = policies.SERVERS % 'confirm_resize'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_confirm_resize,
self.req, self.instance.uuid,
body={'confirmResize': 'null'})
@mock.patch('nova.compute.api.API.revert_resize')
def test_revert_resize_server_policy(self, mock_revert_resize):
rule_name = policies.SERVERS % 'revert_resize'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_revert_resize,
self.req, self.instance.uuid,
body={'revertResize': 'null'})
@mock.patch('nova.compute.api.API.reboot')
def test_reboot_server_policy(self, mock_reboot):
rule_name = policies.SERVERS % 'reboot'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_reboot,
self.req, self.instance.uuid,
body={'reboot': {'type': 'soft'}})
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_policy(self, mock_resize):
rule_name = policies.SERVERS % 'resize'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_resize,
self.req, self.instance.uuid,
body={'resize': {'flavorRef': 'f1'}})
def test_resize_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'resize'
body = {'resize': {'flavorRef': 'f1'}}
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._action_resize,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_overridden_policy_pass_with_same_user(
self, mock_resize):
rule_name = policies.SERVERS % 'resize'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'resize': {'flavorRef': 'f1'}}
self.controller._action_resize(self.req,
self.instance.uuid, body=body)
@mock.patch('nova.compute.api.API.start')
def test_start_server_policy(self, mock_start):
rule_name = policies.SERVERS % 'start'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._start_server,
self.req, self.instance.uuid,
body={'os-start': 'null'})
@mock.patch('nova.compute.api.API.stop')
def test_stop_server_policy(self, mock_stop):
rule_name = policies.SERVERS % 'stop'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._stop_server,
self.req, self.instance.uuid,
body={'os-stop': 'null'})
def test_stop_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'stop'
body = {'os-stop': 'null'}
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._stop_server,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.stop')
def test_stop_server_overridden_policy_pass_with_same_user(
self, mock_stop):
rule_name = policies.SERVERS % 'stop'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'os-stop': 'null'}
self.controller._stop_server(self.req,
self.instance.uuid, body=body)
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_server_policy(self, mock_rebuild):
rule_name = policies.SERVERS % 'rebuild'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_rebuild,
self.req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}})
def test_rebuild_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'rebuild'
body = {'rebuild': {"imageRef": uuids.fake_id}}
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._action_rebuild,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_server_overridden_policy_pass_with_same_user(
self, mock_rebuild):
rule_name = policies.SERVERS % 'rebuild'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'rebuild': {"imageRef": uuids.fake_id}}
self.controller._action_rebuild(self.req,
self.instance.uuid, body=body)
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_trusted_certs_server_policy(self, mock_rebuild):
# 'rebuild' policy is checked before 'rebuild:trusted_certs' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'rebuild'
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.63')
rule_name = policies.SERVERS % 'rebuild:trusted_certs'
body = {
'rebuild': {
'imageRef': uuids.fake_id,
'trusted_image_certificates': [uuids.fake_id],
},
}
if not CONF.oslo_policy.enforce_scope:
check_rule = rule_name
else:
check_rule = functools.partial(
base.rule_if_system, rule, rule_name)
self.common_policy_auth(self.project_action_authorized_contexts,
check_rule,
self.controller._action_rebuild,
req, self.instance.uuid, body=body)
def test_rebuild_trusted_certs_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('', version='2.63')
req.environ['nova.context'].user_id = 'other-user'
rule = policies.SERVERS % 'rebuild'
rule_name = policies.SERVERS % 'rebuild:trusted_certs'
body = {
'rebuild': {
'imageRef': uuids.fake_id,
'trusted_image_certificates': [uuids.fake_id],
},
}
self.policy.set_rules(
{rule: "@",
rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._action_rebuild,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_trusted_certs_overridden_policy_pass_with_same_user(
self, mock_rebuild):
req = fakes.HTTPRequest.blank('', version='2.63')
rule = policies.SERVERS % 'rebuild'
rule_name = policies.SERVERS % 'rebuild:trusted_certs'
body = {
'rebuild': {
'imageRef': uuids.fake_id,
'trusted_image_certificates': [uuids.fake_id],
},
}
self.policy.set_rules(
{rule: "@",
rule_name: "user_id:%(user_id)s"}, overwrite=False)
self.controller._action_rebuild(req,
self.instance.uuid, body=body)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.image.glance.API.generate_image_url')
@mock.patch('nova.compute.api.API.snapshot_volume_backed')
def test_create_image_server_policy(self, mock_snapshot, mock_image,
mock_bdm):
rule_name = policies.SERVERS % 'create_image'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_create_image,
self.req, self.instance.uuid,
body={'createImage': {"name": 'test'}})
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.image.glance.API.generate_image_url')
@mock.patch('nova.compute.api.API.snapshot_volume_backed')
def test_create_image_allow_volume_backed_server_policy(self,
mock_snapshot, mock_image, mock_bdm):
# 'create_image' policy is checked before
# 'create_image:allow_volume_backed' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create_image'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.SERVERS % 'create_image:allow_volume_backed'
if not CONF.oslo_policy.enforce_scope:
check_rule = rule_name
else:
check_rule = functools.partial(
base.rule_if_system, rule, rule_name)
self.common_policy_auth(self.project_action_authorized_contexts,
check_rule,
self.controller._action_create_image,
self.req, self.instance.uuid,
body={'createImage': {"name": 'test'}})
@mock.patch('nova.compute.api.API.trigger_crash_dump')
def test_trigger_crash_dump_server_policy(self, mock_crash):
rule_name = policies.SERVERS % 'trigger_crash_dump'
req = fakes.HTTPRequest.blank('', version='2.17')
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._action_trigger_crash_dump,
req, self.instance.uuid,
body={'trigger_crash_dump': None})
def test_trigger_crash_dump_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('', version='2.17')
req.environ['nova.context'].user_id = 'other-user'
rule_name = policies.SERVERS % 'trigger_crash_dump'
body = {'trigger_crash_dump': None}
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._action_trigger_crash_dump,
req, self.instance.uuid, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.trigger_crash_dump')
def test_trigger_crash_dump_overridden_policy_pass_with_same_user(
self, mock_crash):
req = fakes.HTTPRequest.blank('', version='2.17')
rule_name = policies.SERVERS % 'trigger_crash_dump'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'trigger_crash_dump': None}
self.controller._action_trigger_crash_dump(req,
self.instance.uuid, body=body)
def test_server_detail_with_extended_attr_policy(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
rule = policies.SERVERS % 'detail'
# server 'detail' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
authorize_res, unauthorize_res = self.common_policy_auth(
self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for attr in self.extended_attr:
for resp in authorize_res:
self.assertIn(attr, resp['servers'][0])
for resp in unauthorize_res:
self.assertNotIn(attr, resp['servers'][0])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_show_with_extended_attr_policy(self, mock_get, mock_block):
rule = policies.SERVERS % 'show'
# server 'show' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for attr in self.extended_attr:
for resp in authorize_res:
self.assertIn(attr, resp['server'])
for resp in unauthorize_res:
self.assertNotIn(attr, resp['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch('nova.compute.api.API.rebuild')
def test_server_rebuild_with_extended_attr_policy(self, mock_rebuild,
mock_get, mock_bdm):
rule = policies.SERVERS % 'rebuild'
# server 'rebuild' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
fatal=False)
for attr in self.extended_attr:
# NOTE(gmann): user_data attribute is always present in
# rebuild response since 2.47.
if attr == 'OS-EXT-SRV-ATTR:user_data':
continue
for resp in authorize_res:
self.assertIn(attr, resp.obj['server'])
for resp in unauthorize_res:
self.assertNotIn(attr, resp.obj['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_extended_attr_policy(self,
mock_status, mock_update, mock_group, mock_bdm):
mock_update.return_value = self.instance
mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
fatal=False)
for attr in self.extended_attr:
for resp in authorize_res:
self.assertIn(attr, resp['server'])
for resp in unauthorize_res:
self.assertNotIn(attr, resp['server'])
def test_server_detail_with_host_status_policy(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
rule = policies.SERVERS % 'detail'
# server 'detail' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
authorize_res, unauthorize_res = self.common_policy_auth(
self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['servers'][0])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['servers'][0])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_show_with_host_status_policy(self,
mock_status, mock_block):
rule = policies.SERVERS % 'show'
# server 'show' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['server'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch('nova.compute.api.API.rebuild')
def test_server_rebuild_with_host_status_policy(self, mock_rebuild,
mock_status, mock_bdm):
rule = policies.SERVERS % 'rebuild'
# server 'rebuild' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp.obj['server'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp.obj['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_host_status_policy(self,
mock_status, mock_update, mock_group, mock_bdm):
mock_update.return_value = self.instance
mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['server'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['server'])
@mock.patch('nova.compute.api.API.get_instances_host_statuses')
def test_server_detail_with_unknown_host_status_policy(self, mock_status):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
host_statuses = {}
for server in self.servers:
host_statuses.update({server.uuid: fields.HostStatus.UNKNOWN})
mock_status.return_value = host_statuses
rule = policies.SERVERS % 'detail'
# server 'detail' policy is checked before unknown host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts. To verify the unknown host_status
# policy we need to disallow host_status policy for everyone.
rule_host_status = policies.SERVERS % 'show:host_status'
self.policy.set_rules({
rule: "@",
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
authorize_res, unauthorize_res = self.common_policy_auth(
self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['servers'][0])
self.assertEqual(fields.HostStatus.UNKNOWN,
resp['servers'][0]['host_status'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['servers'][0])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_show_with_unknown_host_status_policy(self,
mock_status, mock_block):
mock_status.return_value = fields.HostStatus.UNKNOWN
rule = policies.SERVERS % 'show'
# server 'show' policy is checked before unknown host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts. To verify the unknown host_status
# policy we need to disallow host_status policy for everyone.
rule_host_status = policies.SERVERS % 'show:host_status'
self.policy.set_rules({
rule: "@",
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['server'])
self.assertEqual(
fields.HostStatus.UNKNOWN, resp['server']['host_status'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch('nova.compute.api.API.rebuild')
def test_server_rebuild_with_unknown_host_status_policy(self, mock_rebuild,
mock_status, mock_bdm):
mock_status.return_value = fields.HostStatus.UNKNOWN
rule = policies.SERVERS % 'rebuild'
# server 'rebuild' policy is checked before unknown host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts. To verify the unknown host_status
# policy we need to disallow host_status policy for everyone.
rule_host_status = policies.SERVERS % 'show:host_status'
self.policy.set_rules({
rule: "@",
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp.obj['server'])
self.assertEqual(
fields.HostStatus.UNKNOWN, resp.obj['server']['host_status'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp.obj['server'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
def test_server_update_with_unknown_host_status_policy(self,
mock_update, mock_group, mock_status, mock_bdm):
mock_update.return_value = self.instance
mock_status.return_value = fields.HostStatus.UNKNOWN
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before unknown host_status
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts. To verify the unknown host_status
# policy we need to disallow host_status policy for everyone.
rule_host_status = policies.SERVERS % 'show:host_status'
self.policy.set_rules({
rule: "@",
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
authorize_res, unauthorize_res = self.common_policy_auth(
self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
fatal=False)
for resp in authorize_res:
self.assertIn('host_status', resp['server'])
self.assertEqual(
fields.HostStatus.UNKNOWN, resp['server']['host_status'])
for resp in unauthorize_res:
self.assertNotIn('host_status', resp['server'])
@mock.patch('nova.compute.api.API.create')
def test_create_requested_destination_server_policy(self,
mock_create):
# 'create' policy is checked before 'create:requested_destination' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = policies.SERVERS % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.74')
def fake_create(context, *args, **kwargs):
for attr in ['requested_host', 'requested_hypervisor_hostname']:
if context in self.all_projects_admin_authorized_contexts:
self.assertIn(attr, kwargs)
if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn(attr, kwargs)
return ([self.instance], '')
mock_create.side_effect = fake_create
body = {
'server': {
'name': 'server_test',
'imageRef': uuids.fake_id,
'flavorRef': uuids.fake_id,
'networks': [{
'uuid': uuids.fake_id
}],
'host': 'fake',
'hypervisor_hostname': 'fake'
},
}
self.common_policy_auth(self.all_projects_admin_authorized_contexts,
self.rule_requested_destination,
self.controller.create,
req, body=body)
@mock.patch(
'nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch(
'nova.objects.Instance.image_meta',
new=objects.ImageMeta.from_dict({}))
@mock.patch('nova.compute.api.API._check_requested_networks')
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
# we have to allow it for everyone otherwise it will
# fail for unauthorized contexts here.
rule = 'os_compute_api:os-migrate-server:migrate'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.CROSS_CELL_RESIZE
req = fakes.HTTPRequest.blank('', version='2.56')
def fake_get(*args, **kwargs):
inst = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuids.fake_id, project_id=self.project_id,
user_id='fake-user', vm_state=vm_states.ACTIVE,
expected_attrs=['system_metadata', 'info_cache'],
launched_at=timeutils.utcnow(), host='host')
inst.services = objects.ServiceList(self.project_member_context)
inst.services.objects.append(
objects.Service(
context=self.project_member_context,
host=inst.host,
binary='nova-compute',
topic='compute',
report_count=0
)
)
return inst
mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
if context in self.cross_cell_authorized_contexts:
self.assertTrue(allow_cross_cell_resize)
if context not in self.cross_cell_authorized_contexts:
self.assertFalse(allow_cross_cell_resize)
return objects.ComputeNode(host=1, hypervisor_hostname=2)
self.stub_out(
'nova.compute.api.API._validate_host_for_cold_migrate',
fake_validate)
self.common_policy_auth(self.cross_cell_authorized_contexts,
rule_name,
self.m_controller._migrate,
req, self.instance.uuid,
body={'migrate': {'host': 'fake'}},
fatal=False)
def test_network_attach_external_network_policy(self):
# NOTE(gmann): Testing policy 'network:attach_external_network'
# which raise different error then PolicyNotAuthorized
# if not allowed.
neutron_api = neutron.API()
for context in self.all_projects_admin_authorized_contexts:
neutron_api._check_external_network_attach(context,
[{'id': 1, 'router:external': 'ext'}])
unauth = (set(self.all_contexts) -
set(self.all_projects_admin_authorized_contexts))
for context in unauth:
self.assertRaises(exception.ExternalNetworkAttachForbidden,
neutron_api._check_external_network_attach,
context, [{'id': 1, 'router:external': 'ext'}])
def test_zero_disk_flavor_policy(self):
# NOTE(gmann): Testing policy 'create:zero_disk_flavor'
# which raise different error then PolicyNotAuthorized
# if not allowed.
image = {'id': uuids.image_id, 'status': 'foo'}
flavor = objects.Flavor(
vcpus=1, memory_mb=512, root_gb=0, extra_specs={'hw:pmu': "true"})
compute_api = compute.API()
for context in self.all_projects_admin_authorized_contexts:
compute_api._validate_flavor_image_nostatus(context,
image, flavor, None)
unauth = (set(self.all_contexts) -
set(self.all_projects_admin_authorized_contexts))
for context in unauth:
self.assertRaises(
exception.BootFromVolumeRequiredForZeroDiskFlavor,
compute_api._validate_flavor_image_nostatus,
context, image, flavor, None)
class ServersNoLegacyNoScopeTest(ServersPolicyTest):
"""Test Servers API policies with deprecated rules disabled, but scope
checking still disabled.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
base_policy.PROJECT_READER,
}
def setUp(self):
super(ServersNoLegacyNoScopeTest, self).setUp()
# Disabling legacy rule support means that we no longer allow
# random roles on our project to take action on our
# resources. We also do not allow admin on other projects
# (i.e. legacy_admin), nor system (because it's admin on no
# project).
self.reduce_set('project_action_authorized', set([
self.project_admin_context, self.project_member_context,
]))
self.reduce_set('project_admin_authorized', set([
self.project_admin_context
]))
# The only additional role that can read our resources is our
# own project_reader.
self.project_reader_authorized_contexts = (
self.project_action_authorized_contexts |
set([self.project_reader_context])
)
# Disabling legacy support means random roles lose power to
# see everything in their project.
self.reduce_set('everyone_authorized',
self.all_contexts - set([self.project_foo_context,
self.system_foo_context]))
# Disabling legacy support means readers and random roles lose
# power to create things on their own projects. Note that
# system_admin and system_member are still here because we are
# not rejecting them by scope, even though these operations
# with those tokens are likely to fail because they have no
# project.
self.reduce_set('project_member_authorized',
self.all_contexts - set([
self.system_reader_context,
self.system_foo_context,
self.project_reader_context,
self.project_foo_context,
self.other_project_reader_context]))
class ServersScopeTypePolicyTest(ServersPolicyTest):
"""Test Servers APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ServersScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# These policy are project scoped only and 'create' policy is checked
# first so even we allow it for everyone the system scoped context
# cannot validate these as they fail on 'create' policy due to
# scope_type. So we need to set rule name as None to skip the policy
# error message assertion in base class. These rule name are only used
# for error message assertion.
self.rule_trusted_certs = None
self.rule_attach_network = None
self.rule_attach_volume = None
self.rule_requested_destination = None
self.rule_forced_host = None
# With scope checking enabled, system admins no longer have
# admin-granted project resource access.
self.reduce_set('project_action_authorized',
set([self.legacy_admin_context,
self.project_admin_context,
self.project_member_context,
self.project_reader_context,
self.project_foo_context]))
# No change from the base behavior here, but we need to
# re-build this from project_action_authorized, since we
# changed it above.
self.project_reader_authorized_contexts = (
self.project_action_authorized_contexts)
# With scope checking enabled, system users no longer have
# project access, even to create their own resources.
self.reduce_set('project_member_authorized', self.all_project_contexts)
# With scope checking enabled, system admin is no longer an
# admin of project resources.
self.reduce_set('project_admin_authorized',
set([self.legacy_admin_context,
self.project_admin_context]))
self.reduce_set('all_projects_admin_authorized',
set([self.legacy_admin_context,
self.project_admin_context]))
# With scope checking enabled, system users also lose access to read
# project resources.
self.reduce_set('everyone_authorized',
self.all_contexts - self.all_system_contexts)
class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
"""Test Servers APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
base_policy.PROJECT_READER,
}
def setUp(self):
super(ServersNoLegacyPolicyTest, self).setUp()
# Disabling legacy support means legacy_admin is no longer
# powerful on our project. Also, we drop the "any role on the
# project means you can do stuff" behavior, so project_reader
# and project_foo lose power.
self.reduce_set('project_action_authorized', set([
self.project_admin_context,
self.project_member_context,
]))
# With no legacy rule and scope checks enable, only project
# admin can do admin things on project resource.
self.reduce_set('project_admin_authorized',
set([self.project_admin_context]))
# Only project_reader has additional read access to our
# project resources.
self.project_reader_authorized_contexts = (
self.project_action_authorized_contexts |
set([self.project_reader_context]))
# Disabling legacy support means random roles lose power to
# see everything in their project.
self.reduce_set(
'everyone_authorized',
self.all_project_contexts - set([self.project_foo_context]))
# Disabling legacy support means readers and random roles lose
# power to create things on their own projects.
self.reduce_set('project_member_authorized',
self.all_project_contexts - set([
self.project_foo_context,
self.project_reader_context,
self.other_project_reader_context,
]))
|
|
import logging
from playingfield import *
from lobby import Lobby
class ClientStatus(Enum):
"""
Encapsulates the different client statuses.
Author:
Maximilian Hess <mail@maximilianhess.com>
"""
NOTCONNECTED = "notconnected"
NOGAMERUNNING = "nogamerunning"
PREPARATIONS = "preparations"
WAITINGFOROPPONENT = "waitingforopponent"
PREPARATIONSENDED = "preparationsended"
OWNTURN = "ownturn"
OPPONENTSTURN = "oppenentsturn"
YOUWIN = "youwin"
YOULOSE = "youlose"
class Callback:
"""
Callback (observer pattern).
Author:
Maximilian Hess <mail@maximilianhess.com>
"""
def onAction(self):
"""
Calls the callback.
"""
pass
class GameInformation:
"""
Represents a game.
Author:
Maximilian Hess <mail@maximilianhess.com>
Args:
name: the name of the game
firstPlayer: the identifier of the first player
"""
def toString(self):
"""
Returns a string representativ of the game.
"""
result = ("%s: %s vs.") % (self.name, self.players[0])
if len(self.players) > 1:
result = ("%s %s") % (result, self.players[1])
return result
def __init__(self, name, firstPlayer):
self.name = name
self.players = [firstPlayer]
class PlayerInformation:
"""
Represents a player.
Author:
Maximilian Hess <mail@maximilianhess.com>
Args:
id: the identifier of the player
nickname: the nickname of the player
"""
def toString(self):
"""
Returns a string representativ of the player.
"""
return "%s(%s)" % (self.id, self.nickname)
def __init__(self, id, nickname):
self.id = id
self.nickname = nickname
class Backend:
"""
Game client backend that does all kind of controller stuff.
Author:
Maximilian Hess <mail@maximilianhess.com>
Args:
length: the length of the playing field
hostname: the hostname of the server to connect to. If not set do not connect to any server so far
port: the port of the server to connect to. If not set do not connect to any server so far
"""
def getOwnShips(self):
"""
Returns the player's ship.
Returns:
Returns the player's ship.
"""
return self.__ownPlayingField.getShips()
def getOwnShip(self, shipId):
"""
Returns a specified ship from the own playing field.
Args:
shipId: the id of the ship
Returns:
Returns a specified ship from the own playing field.
"""
return self.__ownPlayingField.getShip(shipId)
def getEnemyPlayingField(self):
"""
Returns the enemey's ships.
Returns:
Returns the enemey's ships.
"""
return self.__enemeysPlayingField.getField()
def placeShip(self, bow, rear):
"""
Places a new Ship on the own playing field.
Args:
bow: address of the bow
rear: address of the rear
Returns:
Returns the id of the newly placed ship. In addition returns True if the user has to place more ships and
False of the user successfully placed all his ships.
"""
shipId, moreShips = self.__ownPlayingField.placeShip(bow, rear)
if 0 <= shipId < 10:
self.shipUpdate(shipId)
if not moreShips and not self.__boardAlreadySent:
self.__boardAlreadySent = True
self.__serverHandler.boardInit(self.__ownPlayingField.getShips())
self.__updateClientStatus(ClientStatus.WAITINGFOROPPONENT)
return moreShips
def onPlaceShips(self, success):
"""
Is called when the server returns a ship placement report.
Args:
success: True if the placement has been successful or False if not
"""
if not success:
self.__onError("Failed to place ships")
def registerClientStatusCallback(self, callback):
"""
Registers a new callback that will be called when the status of the client updates.
Args:
callback: the callback
Returns:
The current status.
"""
self.__clientStatusCallbacks.append(callback)
logging.debug("Client status callback added")
def __updateClientStatus(self, status):
self.clientStatus = status
for cb in self.__clientStatusCallbacks:
cb.onAction()
def registerLobbyUpdateGamesCallback(self, callback):
"""
Registers a new callback that is called when the server sends a lobby update.
Args:
callback: the callback
Returns:
A tuple consisting of the players and the games.
"""
self.__lobbyUpdateGamesCallbacks.append(callback)
logging.debug("Lobby callback added")
return self.__lobbyCurrentPlayers, self.__lobbyCurrentGames
def removeLobbyUpdateGamesCallback(self, callback):
"""
Removes a lobby update callback.
Args:
callback: the callback to remove
"""
for cb in self.__lobbyUpdateGamesCallbacks:
if cb is callback:
self.__lobbyUpdateGamesCallbacks.remove(callback)
logging.debug("Lobby callback removed")
def onLobbyUpdates(self, players, games):
"""
Calls all lobby update callbacks when there is any update.
Args:
players: complete list of the current players
games: complete list of the current games
"""
self.lobby.onUpdate(games, players)
# check if there was an update with the own game. E.g. opponent joined or changed nickname
if self.lobby.hasGame():
for game in games:
if game.name == self.lobby.game.name:
if self.lobby.hasOpponent():
# TODO: Check if opponent changed nickname
pass
else:
# check if anybody joined
if len(game.players) > 1:
self.lobby.setOpponent(game.players[1])
self.__onOpponentJoinedGame()
break
for callback in self.__lobbyUpdateGamesCallbacks:
callback.onAction()
def joinGame(self, gameId, callback):
"""
Joins a new game and registers a callback that will be called when the server answered.
Args:
gameId: the id of the game to join
callback: the callback
"""
self.__triedToJoinGame = True
self.__joinGameCallbacks.append(callback)
self.lobby.tryToGame(gameId)
self.__serverHandler.joinGame(gameId)
def onJoinGame(self, success):
"""
Calls all registered callbacks when the server answers a game join query.
Args:
success: True of the query has been successful or False if not
"""
if success:
self.lobby.joinSuccessful()
logging.info("Successfully join game '%s' against '%s'" % (self.lobby.game.name,
self.lobby.getNickname(self.lobby.opponent)))
# TODO: validate current client status
self.__updateClientStatus(ClientStatus.PREPARATIONS)
for cb in self.__joinGameCallbacks:
cb.onAction(success)
self.__joinGameCallbacks = []
def createGame(self, gameId, callback):
"""
Creates a new game on the current servers and registers a callback that will be called when the server answers.
Args:
gameId: the identiefier (a name) of the game
callback: the callback
"""
self.__triedToCreateGame = True
self.__createGameCallbacks.append(callback)
self.lobby.tryToGame(gameId)
self.__serverHandler.createGame(gameId)
def onIllegalGameDefinition(self):
"""
Is called when there is a problem with the game.
"""
if self.__triedToCreateGame:
self.onCreateGame(False)
self.__triedToJoinGame = False
elif self.__triedToJoinGame:
self.onJoinGame(False)
self.__triedToJoinGame = False
def onCreateGame(self, success):
"""
Calls all registered callbacks when the servers answers a create game query.
Args:
success: True of the query has been successful or False if not
"""
#TODO: validate current client status
if success:
self.lobby.createSuccessful()
self.__updateClientStatus(ClientStatus.WAITINGFOROPPONENT)
for cb in self.__createGameCallbacks:
cb.onAction(success)
self.__createGameCallbacks = []
def onCapitulate(self):
"""
Is called when the client receives a capitulate report.
"""
for cb in self.__capitulateCallbacks:
cb.onAction()
self.__capitulateCallbacks = []
self.__updateClientStatus(ClientStatus.YOULOSE)
def registerLeaveGameCallback(self, callback):
self.__leaveGameCallbacks.append(callback)
def leaveGame(self):
"""
Leaves the current game and registers a callback to wait for an answer from the server.
Args:
callback: the callback
"""
self.__serverHandler.leaveGame()
def onLeaveGame(self):
"""
Is called when the client received an answer to the leave game query.
"""
for cb in self.__leaveGameCallbacks:
cb.onAction()
self.__updateClientStatus(ClientStatus.NOGAMERUNNING)
def onGameAborted(self):
"""
Is called when the game has been aborted.
"""
for cb in self.__leaveGameCallbacks:
cb.onAction()
self.__leaveGameCallbacks = []
self.__updateClientStatus(ClientStatus.NOGAMERUNNING)
def onGameEnded(self, params):
"""
Is called when the game ended.
Args:
params: some fancy argument concerning this issue
"""
# believable mad report...
if (params["winner"] == "0" and self.lobby.playerCreatedGame) \
or (params["winner"] == "1" and self.lobby.playerJoinedGame):
self.__updateClientStatus(ClientStatus.YOUWIN)
elif (params["winner"] == "0" and self.lobby.playerJoinedGame) \
or (params["winner"] == "1" and self.lobby.playerCreatedGame):
self.__updateClientStatus(ClientStatus.YOULOSE)
def close(self):
"""
Closes the client.
"""
self.__serverHandler.close()
self.__udpDiscoverer.close()
def connect(self, nickname, hostname, port):
"""
Connects to a server.
Args:
hostname: the hostname or IP address of the server
port: the port of the server
"""
# TODO: Validate input (if it is None)
result = self.__serverHandler.connect(hostname, port)
if result:
self.__updateClientStatus(ClientStatus.NOGAMERUNNING)
self.__serverHandler.setNickname(nickname)
self.lobby.nickname = nickname
return result
def setNickname(self, nickname):
"""
Resets the nickname of the player.
Args:
nickname: the new nickname
"""
self.__serverHandler.setNickname(nickname)
def registerUdpDiscoveryCallback(self, callback):
"""
Registers a callback that informs about newly discovered servers.
Args:
callback: the callback
"""
self.__udpDiscoveryCallbacks.append(callback)
logging.debug("UDP discovery callback added")
return self.__udpServers
def removeUdpDiscoveryCallback(self, callback):
"""
Removes an already registered UDP discovery callback.
Args:
callback: the callback to remove
"""
for cb in self.__udpDiscoveryCallbacks:
if cb is callback:
self.__udpDiscoveryCallbacks.remove(callback)
logging.debug("UDP Discovery callback removed")
def udpDiscoveryUpdate(self, server):
"""
Is called when there is a server update.
Args:
servers: a list of servers discovered by UDP broadcast
"""
if server not in self.__udpServers:
self.__udpServers.append(server)
for cb in self.__udpDiscoveryCallbacks:
cb.onAction(self.__udpServers)
def registerGamePlayCallback(self, callback):
"""
Registers a callback to stay informed about game play updates.
Args:
callback: the callback
"""
self.__gamePlayCallbacks.append(callback)
def onGamePlayUpdate(self, status):
"""
Is called when there is a game play update.
Args:
status: the received status update
"""
if status is 11:
self.__updateClientStatus(ClientStatus.OWNTURN)
elif status is 21 or status is 22 or status is 24:
if status is 21:
self.__onMove(self.__lastMove[0], self.__lastMove[1])
self.__onRepaint()
elif status is 24:
self.__onSpecialAttack()
self.__updateClientStatus(ClientStatus.OPPONENTSTURN)
elif status is 23:
self.__updateClientStatus(ClientStatus.YOULOSE)
elif status is 31:
self.__onError("Move not allowed")
elif status is 32:
self.__onError("Special Attack not allowed")
elif status is 39:
self.__onError("Attack not allowed")
for cb in self.__gamePlayCallbacks:
cb.onAction(status)
def capitulate(self, callback):
"""
The player capitulate.
Args:
callback: is called after the server answers
"""
self.__capitulateCallbacks.append(callback)
self.__serverHandler.capitulate()
def registerGamePreparationsEndedCallback(self, callback):
"""
Registers a callback that will be called when game preparations have finished.
Args:
callback: the callback
"""
self.__gamePreparationsEndedCallbacks.append(callback)
def gamePreparationsEndedResponse(self):
"""
Is called when the client received an answer to the game preparations query.
"""
self.__updateClientStatus(ClientStatus.PREPARATIONSENDED)
for cb in self.__gamePreparationsEndedCallbacks:
cb.onAction()
def registerShipUpdateCallback(self, callback):
"""
Registers a new callback to inform about ship updates.
Args:
callback: the callback
"""
self.__shipUpdateCallbacks.append(callback)
def shipUpdate(self, shipId):
"""
Is called when there is any ship update.
Args:
shipId: the id of the updated ship
"""
for cb in self.__shipUpdateCallbacks:
cb.onAction(shipId)
def attack(self, target):
"""
Attacks the enemy at the given field.
Args:
target: the address of the field
"""
# TODO: validate field
if self.clientStatus is not ClientStatus.OWNTURN:
self.__onError("It is not your turn.")
return
self.__serverHandler.attack(target)
def specialAttack(self, target):
"""
Special-attacks the given field.
Args:
target: the address of the bottom-left field
"""
# TODO: validate field
if self.clientStatus is not ClientStatus.OWNTURN:
self.__onError("It is not your turn.")
return
self.__serverHandler.specialAttack(target)
def __onMove(self, shipId, direction):
self.__ownPlayingField.move(shipId, direction)
def move(self, shipId, direction):
# TODO: validate direction
if self.clientStatus is not ClientStatus.OWNTURN:
self.__onError("It is not your turn.")
return
success = self.__ownPlayingField.movePossible(shipId, direction)
if success:
self.__serverHandler.move(shipId, direction)
self.__lastMove = (shipId, direction)
return success
def errorResponse(self, status):
"""
Error response.
Args:
status: the status
"""
pass
def sendChatMessage(self, msg):
"""
Sends a chat message.
Args:
msg: the message
"""
self.__serverHandler.sendChatMessage(msg)
def registerChatCallback(self, callback):
"""
Registers a chat callback.
Args:
callback: the callback
"""
self.__chatCallbacks.append(callback)
def onIncomingChatMessage(self, authorId, timestamp, message):
"""
Called when a chat message comes in and calls the correspong callbacks.
Args:
authorId: the id of the author who sent the message
timestamp: unix timestamp in millis indicating the time when the message was sent
message: the content of the message
"""
for cb in self.__chatCallbacks:
cb.onAction(authorId, timestamp, message)
def registerJoinGameCallback(self, callback):
"""
Registers a game join callback.
Args:
callback: the callback
"""
self.__joinGameCallbacks.append(callback)
def registerErrorCallback(self, callback):
"""
Registers an error callback.
Args:
callback: the callback
"""
self.__errorCallbacks.append(callback)
def __onError(self, error):
for cb in self.__errorCallbacks:
cb.onAction(error)
def registerOpponentJoinedGameCallback(self, callback):
"""
Registers an opponent joined callback.
Args:
callback:
"""
self.__opponentJoinedGameCallbacks.append(callback)
def __onOpponentJoinedGame(self):
for cb in self.__opponentJoinedGameCallbacks:
cb.onAction()
def getShipAtPosition(self, field):
"""
Returns the id of a ship at a given position if there is one.
Args:
field: the field to check
Returns: The id of the ship if there is one or False if there is no ship.
"""
return self.__ownPlayingField.getShipAtPosition(field)
def isUnfogged(self, field):
"""
Validates if a given field is unfogged.
Args:
field: the field to validate
Returns: True if the field is unfogged or False if not.
"""
return self.__ownPlayingField.isUnfogged(field)
def onUpdateOwnFields(self, params):
"""
Is called when there are updates for the own playing field.
Args:
params: the updates
"""
if self.__ownPlayingField.onAttack(params):
self.__playSound("")
self.__onRepaint()
def __playSound(self, type):
for cb in self.__playSoundCallback:
cb.onAction(type)
def onUpdateEnemyFields(self, params):
"""
Is called when there are updates for the enemy's playing field.
Args:
params: the updates
"""
self.__enemeysPlayingField.onAttack(params)
self.__onRepaint()
def getOwnUnfogged(self):
"""
Returns the unfogged fields on the own playing field.
Returns: the unfogged fields on the own playing field.
"""
return self.__ownPlayingField.getUnfogged()
def getEnemyUnfogged(self):
"""
Returns the unfogged fields on the enemy's playing field.
Returns: the unfogged fields on the enemy's playing field.
"""
return self.__enemeysPlayingField.getUnfogged()
def onBeginShipPlacing(self):
"""
Is called when the client receives a place ships report from the server.
"""
self.__updateClientStatus(ClientStatus.PREPARATIONS)
def registerRepaintCallback(self, callback):
"""
Registers arepaint callback.
Args:
callback: the callback
"""
self.__repaintCallbacks.append(callback)
def registerPlaySoundCallback(self, callback):
"""
Registers a sound callback.
Args:
callback:
Returns:
"""
self.__playSoundCallback.append(callback)
def registerSpecialAttackCallback(self, callback):
"""
Registers a spcial attack callback.
Args:
callback: the callback
"""
self.__specialAttackCallbacks.append(callback)
def __onSpecialAttack(self):
for cb in self.__specialAttackCallbacks:
cb.onAction()
def __onRepaint(self):
for cb in self.__repaintCallbacks:
cb.onAction()
def resetClient(self):
"""
Resets the client and prepares for a new game.
"""
logging.info("Resetting backend...")
self.__setup()
self.lobby.reset()
self.__updateClientStatus(ClientStatus.NOGAMERUNNING)
def onLostConnection(self):
"""
Is called when the client losts the connection to the server.
"""
logging.info("Resetting backend...")
self.__serverHandler.disconnect()
self.__setup()
self.lobby.reset()
self.__updateClientStatus(ClientStatus.NOTCONNECTED)
def disconnect(self):
self.__serverHandler.disconnect()
self.resetClient()
self.__updateClientStatus(ClientStatus.NOTCONNECTED)
def __setup(self):
self.__ownPlayingField = PlayingField(self.__length, self.devmode)
self.__enemeysPlayingField = EnemyPlayingField(self.__length)
self.clientStatus = ClientStatus.NOTCONNECTED
self.__boardAlreadySent = False
self.__lastMove = None
self.__triedToJoinGame = False
self.__triedToCreateGame = False
def __init__(self, length, hostname, port, nickname, devmode):
from serverhandler import ServerHandler
from udpdiscoverer import UDPDiscoverer
self.__length = length
self.lobby = Lobby(nickname)
self.devmode = devmode
self.__setup()
# callback stuff
self.__udpDiscoveryCallbacks = []
self.__udpServers = []
self.__clientStatusCallbacks = []
self.__lobbyCurrentPlayers = []
self.__lobbyCurrentGames = []
self.__lobbyUpdateGamesCallbacks = []
self.__joinGameCallbacks = []
self.__createGameCallbacks = []
self.__leaveGameCallbacks = []
self.__capitulateCallbacks = []
self.__connectCallbacks = []
self.__gamePreparationsEndedCallbacks = []
self.__gamePlayCallbacks = []
self.__shipUpdateCallbacks = []
self.__chatCallbacks = []
self.__joinGameCallbacks = []
self.__errorCallbacks = []
self.__opponentJoinedGameCallbacks = []
self.__repaintCallbacks = []
self.__specialAttackCallbacks = []
self.__playSoundCallback = []
self.__serverHandler = ServerHandler(self)
if hostname and port and nickname:
if self.connect(nickname, hostname, port):
self.clientStatus = ClientStatus.NOGAMERUNNING
self.__udpDiscoverer = UDPDiscoverer(self)
|
|
#!/usr/bin/env python
from __future__ import print_function
import itertools
from collections import namedtuple
from textwrap import dedent
# https://xkcd.com/1319/
# https://xkcd.com/1205/
TestEnvBase = namedtuple('TestEnvBase', ['python_version', 'pytest_version',
'django_version', 'settings'])
class TestEnv(TestEnvBase):
def is_py2(self):
return self.python_version.startswith('python2') or self.python_version == 'pypy'
def is_py3(self):
return self.python_version.startswith('python3') or self.python_version == 'pypy3'
def is_pypy(self):
return self.python_version.startswith('pypy')
# Python to run tox.
RUN_PYTHON = '3.5'
PYTHON_MAIN_VERSIONS = ['python2.7', 'python3.4']
PYTHON_VERSIONS = ['python2.6', 'python2.7', 'python3.2', 'python3.3',
'python3.4', 'python3.5', 'pypy', 'pypy3']
PYTEST_VERSIONS = ['2.7.3', '2.8.1']
DJANGO_VERSIONS = ['1.4', '1.5', '1.6', '1.7', '1.8', '1.9', 'master']
SETTINGS = ['sqlite', 'sqlite_file', 'mysql_myisam', 'mysql_innodb',
'postgres']
DJANGO_REQUIREMENTS = {
'1.4': 'Django>=1.4,<1.5',
'1.5': 'Django>=1.5,<1.6',
'1.6': 'Django>=1.6,<1.7',
'1.7': 'Django>=1.7,<1.8',
'1.8': 'Django>=1.8,<1.9',
'1.9': 'Django==1.9a1',
'master': 'https://github.com/django/django/archive/master.tar.gz',
}
TOX_TESTENV_TEMPLATE = dedent("""
[testenv:%(testenv_name)s]
commands =
%(commands)s
basepython = %(python_version)s
deps =
%(deps)s
setenv =
PYTHONPATH = {toxinidir}
UID = %(uid)s
""")
def is_valid_env(env):
# Stable database adapters for PyPy+Postgres/MySQL are hard to come by..
if env.is_pypy() and env.settings in ('postgres', 'mysql_myisam', 'mysql_innodb'):
return False
if env.is_py3():
# Django <1.5 does not support Python 3
if env.django_version == '1.4':
return False
# MySQL on Python 3 is not supported by Django
if env.settings in ('mysql_myisam', 'mysql_innodb'):
return False
# Django 1.7 dropped Python 2.6 support
if env.python_version == 'python2.6' and env.django_version in ('1.7', '1.8', '1.9', 'master'):
return False
# Django 1.9 dropped Python 3.2 and Python 3.3 support
if (env.python_version in ('python3.2', 'python3.3') and
env.django_version in ('1.7', '1.8', '1.9', 'master')):
return False
# Python 3.5 is only supported by Django 1.8+
if env.python_version == 'python3.5':
return env.django_version in ('1.8', '1.9', 'master')
# pypy3 is compatible with Python 3.2, but Django 1.9 only supports Python 2.7, 3.4+.
if env.python_version == 'pypy3' and env.django_version in ('1.9', 'master'):
return False
return True
def requirements(env):
yield 'pytest==%s' % (env.pytest_version)
yield 'pytest-xdist==1.13.1'
yield DJANGO_REQUIREMENTS[env.django_version]
yield 'django-configurations==0.8'
if env.is_py2():
yield 'south==1.0.2'
if env.settings == 'postgres':
yield 'psycopg2==2.6.1'
if env.settings in ('mysql_myisam', 'mysql_innodb'):
yield 'mysql-python==1.2.5'
def commands(uid, env):
# Django versions prior to 1.7 must have the production database available
# https://code.djangoproject.com/ticket/16969
db_name = 'pytest_django_%s' % uid
# The sh trickery always exits with 0
if env.settings in ('mysql_myisam', 'mysql_innodb'):
yield 'sh -c "mysql -u root -e \'drop database if exists %(name)s;' \
' create database %(name)s\'" || exit 0' % {'name': db_name}
if env.settings == 'postgres':
yield 'sh -c "dropdb %(name)s;' \
' createdb %(name)s || exit 0"' % {'name': db_name}
yield 'py.test --ds=pytest_django_test.settings_%s --strict -r fEsxXw {posargs:tests}' % env.settings
def testenv_name(env):
if len(PYTEST_VERSIONS) == 1:
env = [getattr(env, x) for x in env._fields if x != 'pytest_version']
return '-'.join(env)
def tox_testenv_config(uid, env):
cmds = '\n'.join(' %s' % r for r in commands(uid, env))
deps = '\n'.join(' %s' % r for r in requirements(env))
return TOX_TESTENV_TEMPLATE % {
'testenv_name': testenv_name(env),
'python_version': env.python_version,
'django_version': env.django_version,
'settings': env.settings,
'commands': cmds,
'deps': deps,
'uid': uid,
}
def generate_all_envs():
products = itertools.product(PYTHON_VERSIONS, PYTEST_VERSIONS,
DJANGO_VERSIONS, SETTINGS)
for (python_version, pytest_version, django_version, settings) \
in products:
env = TestEnv(python_version, pytest_version, django_version, settings)
if is_valid_env(env):
yield env
def generate_default_envs(envs):
"""
Returns a list of testenvs that include all different Python versions, all
Django versions and all database backends.
"""
result = set()
def find_and_add(variations, env_getter):
for variation in variations:
for existing in result:
if env_getter(existing) == variation:
break
else:
for env in reversed(envs):
if env_getter(env) == variation:
result.add(env)
break
# Add all Django versions for each main python version (2.x and 3.x).
find_and_add(itertools.product(PYTHON_MAIN_VERSIONS, DJANGO_VERSIONS),
lambda env: (env.python_version, env.django_version))
find_and_add(PYTHON_VERSIONS, lambda env: env.python_version)
find_and_add(PYTEST_VERSIONS, lambda env: env.pytest_version)
find_and_add(DJANGO_VERSIONS, lambda env: env.django_version)
find_and_add(SETTINGS, lambda env: env.settings)
return result
def make_tox_ini(envs, default_envs):
default_env_names = ([testenv_name(env) for env in default_envs] +
['checkqa-%s' % python_version for python_version in
PYTHON_MAIN_VERSIONS])
contents = [dedent('''
[tox]
envlist = %(active_envs)s
[testenv]
whitelist_externals =
sh
''' % {'active_envs': ','.join(default_env_names)}).lstrip()]
# Add checkqa-testenvs for different PYTHON_VERSIONS.
# flake8 is configured in setup.cfg.
idx = 0
for python_version in PYTHON_VERSIONS:
idx = idx + 1
contents.append(dedent("""
[testenv:checkqa-%(python_version)s]
commands =
flake8 --version
flake8 --show-source --statistics pytest_django tests
basepython = %(python_version)s
deps =
flake8
setenv =
UID = %(uid)s""" % {
'python_version': python_version,
'uid': idx,
}))
for env in envs:
idx = idx + 1
contents.append(tox_testenv_config(idx, env))
return '\n'.join(contents)
def make_travis_yml(envs):
contents = dedent("""
# Use container-based environment (faster startup, allows caches).
sudo: false
language: python
python:
- "%(RUN_PYTHON)s"
env:
%(testenvs)s
%(checkenvs)s
matrix:
allow_failures:
%(allow_failures)s
install:
# Create pip wrapper script, using travis_retry (a function) and
# inject it into tox.ini.
- mkdir -p bin
- PATH=$PWD/bin:$PATH
- printf '#!/bin/sh\\n' > bin/travis_retry_pip
- declare -f travis_retry >> bin/travis_retry_pip
- printf '\\necho "Using pip-wrapper.." >&2\\ntravis_retry pip "$@"' >> bin/travis_retry_pip
- chmod +x bin/travis_retry_pip
- sed -i.bak 's/^\[testenv\]/\\0\\ninstall_command = travis_retry_pip install {opts} {packages}/' tox.ini
- diff tox.ini tox.ini.bak && return 1 || true
- sed -i.bak 's/whitelist_externals =/\\0\\n travis_retry_pip/' tox.ini
- diff tox.ini tox.ini.bak && return 1 || true
- pip install tox
script: tox -e $TESTENV
""").strip("\n")
testenvs = '\n'.join(' - TESTENV=%s' % testenv_name(env) for env in envs)
checkenvs = '\n'.join(' - TESTENV=checkqa-%s' %
python for python in PYTHON_MAIN_VERSIONS)
allow_failures = '\n'.join(' - env: TESTENV=%s' %
testenv_name(env) for env in envs
if env.django_version == 'master')
return contents % {
'testenvs': testenvs,
'checkenvs': checkenvs,
'allow_failures': allow_failures,
'RUN_PYTHON': RUN_PYTHON,
}
def main():
all_envs = sorted(generate_all_envs())
default_envs = sorted(generate_default_envs(all_envs))
with open('tox.ini', 'w+') as tox_ini_file:
tox_ini_file.write(make_tox_ini(all_envs, default_envs))
with open('.travis.yml', 'w+') as travis_yml_file:
travis_yml_file.write(make_travis_yml(default_envs))
print ('tox.ini and .travis.yml has been generated!')
if __name__ == '__main__':
main()
|
|
import json
from ryu.base import app_manager
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from webob import Response
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.lib.packet import arp
from ryu.lib.packet import ether_types
from netaddr import IPNetwork, IPAddress
import pprint
from config import service_config, forwarding_config
from models import nat_settings
from helper import ofp_helper, nat_helper
from route import urls
IP_TO_MAC_TABLE = {}
# a.k.a arp table
nat_instance_name = 'nat_instance_api_app'
class SNAT(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(SNAT, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
wsgi.register(SNATRest, {nat_instance_name: self})
self.ingress_table_id = service_config.service_sequence['nat_ingress']
self.egress_table_id = service_config.service_sequence['nat_egress']
self.forward_table_id = service_config.service_sequence['forwarding']
self.goto_table_priority = service_config.service_priority['goto_table']
self.service_priority = service_config.service_priority['nat']
settings = nat_settings.load()
self.wan_port = settings['wan_port']
self.public_ip = str(settings['public_ip'])
self.public_gateway = str(settings['public_gateway'])
self.public_ip_subnetwork = settings['public_ip_subnetwork']
self.private_gateway = settings['private_gateway']
self.private_subnetwork = settings['private_subnetwork']
self.mac_on_wan = settings['mac_on_wan']
self.mac_on_lan = settings['mac_on_lan']
self.IDLE_TIME = 100
self.port_counter = -1
self.ports_pool = range(2000, 65536)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# # Table Miss, forward packet to next table
# match = parser.OFPMatch()
# ofp_helper.add_flow_goto_next(datapath, table_id=self.egress_table_id,
# priority=self.goto_table_priority, match=match)
# ofp_helper.add_flow_goto_next(datapath, table_id=self.ingress_table_id,
# priority=self.goto_table_priority, match=match)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
"""
Flow-Removed message.
When switch send flow-Removed message to controller,
controller will release tcp/udp port which is not in use,
putting it back to ports-pool.
"""
#print '[*] Flow-Removed EVENT'
msg = ev.msg
tcp_port = msg.match.get('tcp_dst')
udp_port = msg.match.get('udp_dst')
if tcp_port:
#print '[*] Available TCP port %d' % tcp_port
self.ports_pool.append(tcp_port)
# self.ports_pool.sort()
elif udp_port:
#print '[*] Available UDP port %d' % udp_port
self.ports_pool.append(udp_port)
# self.ports_pool.sort()
def _send_packet_to_port(self, datapath, port, data):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(port=port)]
out = parser.OFPPacketOut(datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
actions=actions,
data=data)
datapath.send_msg(out)
def _arp_request_handler(self, pkt_arp):
"""
Handle ARP request packets.
When controller get an ARP request packet,
it will reply someone who want to ask NAT's MAC address.
(Probably under NAT's LAN or WAN)
"""
data = None
if pkt_arp.opcode != arp.ARP_REQUEST:
print '[WARRING] Wrong ARP opcode!'
return None
if pkt_arp.dst_ip == str(self.private_gateway):
# What's the MAC address of NAT Private IP? In other word,
# What's the MAC address of Gateway in this private network?
# Who ask this is must from LAN (Private Network Host)
# So we reply fake MAC address define in `mac_on_lan`
data = nat_helper.arp_reply(src_mac=self.mac_on_lan,
src_ip=str(self.private_gateway),
target_mac=pkt_arp.src_mac,
target_ip=pkt_arp.src_ip)
elif pkt_arp.dst_ip == self.public_ip:
# What's the MAC address of NAT Public IP?
# Who ask this is must from WAN (Extranet Network Host)
# So we reply fake MAC address defined in `mac_on_lan`
data = nat_helper.arp_reply(src_mac=self.mac_on_wan,
src_ip=self.public_ip,
target_mac=pkt_arp.src_mac,
target_ip=pkt_arp.src_ip)
return data
def _arp_reply_handler(self, pkt_arp):
"""
Handle ARP reply packets.
When controller get an ARP reply packet, it will update ARP table.
"""
if pkt_arp.opcode != arp.ARP_REPLY:
print '[WARRING] Wrong ARP opcode!'
return None
if pkt_arp.dst_ip == self.public_ip:
IP_TO_MAC_TABLE[pkt_arp.src_ip] = pkt_arp.src_mac
def _get_available_port(self):
"""
Getting port number sequential increase.
"""
# self.port_counter += 1
p = self.ports_pool.pop(0)
return p
def _in_public_ip_subnetwork(self, ip):
ip = IPAddress(ip)
return ip in self.public_ip_subnetwork
def _in_private_subnetwork(self, ip):
ip = IPAddress(ip)
return ip in self.private_subnetwork
def _is_public(self, ip):
ip = IPAddress(ip)
return ip.is_unicast() and not ip.is_private()
def _private_to_public(self, datapath, buffer_id, data, in_port, out_port,
pkt_ip, pkt_ethernet, pkt_tcp=None, pkt_udp=None,
pkt_icmp=None):
if pkt_ip is None:
return
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
eth_dst = pkt_ethernet.dst
eth_src = pkt_ethernet.src
ipv4_src = pkt_ip.src
ipv4_dst = pkt_ip.dst
nat_port = self._get_available_port()
if (self._is_public(ipv4_dst) and not self._in_public_ip_subnetwork(ipv4_dst)):
target_ip = self.public_gateway
elif self._in_public_ip_subnetwork(ipv4_dst):
target_ip = ipv4_dst
elif self._in_private_subnetwork(ipv4_dst):
return
if pkt_tcp:
# Install TCP Flow Entry
tcp_src = pkt_tcp.src_port
tcp_dst = pkt_tcp.dst_port
# egress
match = parser.OFPMatch(in_port=in_port,
eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_TCP,
ipv4_src=ipv4_src,
ipv4_dst=ipv4_dst,
tcp_src=tcp_src,
tcp_dst=tcp_dst)
actions = [parser.OFPActionSetField(eth_dst=IP_TO_MAC_TABLE[target_ip]),
parser.OFPActionSetField(eth_src='00:0e:c6:87:a6:fb'),
parser.OFPActionSetField(ipv4_src=self.public_ip),
parser.OFPActionSetField(tcp_src=nat_port)]
forward_actions = [parser.OFPActionOutput(out_port)]
# ingress
match_back = parser.OFPMatch(in_port=out_port,
eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_TCP,
ipv4_src=ipv4_dst,
ipv4_dst=self.public_ip,
tcp_src=tcp_dst,
tcp_dst=nat_port)
actions_back = [parser.OFPActionSetField(eth_dst=eth_src),
parser.OFPActionSetField(ipv4_dst=ipv4_src),
parser.OFPActionSetField(tcp_dst=tcp_src)]
forward_match_back = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_TCP,
ipv4_src=ipv4_dst,
ipv4_dst=ipv4_src,
tcp_src=tcp_dst,
tcp_dst=tcp_src)
forward_actions_back = [parser.OFPActionOutput(in_port)]
elif pkt_udp:
# Install UDP Flow Entry
udp_src = pkt_udp.src_port
udp_dst = pkt_udp.dst_port
# egress, inside-to-outside
match = parser.OFPMatch(in_port=in_port,
eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_UDP,
ipv4_src=ipv4_src,
ipv4_dst=ipv4_dst,
udp_src=udp_src,
udp_dst=udp_dst)
actions = [parser.OFPActionSetField(eth_dst=IP_TO_MAC_TABLE[target_ip]),
parser.OFPActionSetField(eth_src='00:0e:c6:87:a6:fb'),
parser.OFPActionSetField(ipv4_src=self.public_ip),
parser.OFPActionSetField(udp_src=nat_port)]
forward_actions = [parser.OFPActionOutput(out_port)]
# ingress, outside-to-inside
match_back = parser.OFPMatch(in_port=out_port,
eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_UDP,
ipv4_src=ipv4_dst,
ipv4_dst=self.public_ip,
udp_src=udp_dst,
udp_dst=nat_port)
actions_back = [parser.OFPActionSetField(eth_dst=eth_src),
parser.OFPActionSetField(ipv4_dst=ipv4_src),
parser.OFPActionSetField(udp_dst=udp_src)]
forward_match_back = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_UDP,
ipv4_src=ipv4_dst,
ipv4_dst=ipv4_src,
udp_src=udp_dst,
udp_dst=udp_src)
forward_actions_back = [parser.OFPActionOutput(in_port)]
else:
pass
# outside - inside set-filed (Table 0)
ofp_helper.add_flow_with_next(datapath, table_id=self.ingress_table_id,
priority=self.service_priority, match=match_back,
actions=actions_back, idle_timeout=self.IDLE_TIME)
# inside - outside go-to-next (Table 0)
# ofp_helper.add_flow_goto_next(datapath, table_id=self.ingress_table_id,
# priority=self.service_priority, match=match,
# idle_timeout=self.IDLE_TIME)
# outside - inside out-port (Table 3)
ofp_helper.add_flow(datapath, table_id=self.forward_table_id,
priority=self.service_priority, match=forward_match_back,
actions=forward_actions_back, idle_timeout=self.IDLE_TIME)
# inside - outside write-out-port(Table 3)
ofp_helper.add_write_flow_with_next(datapath, table_id=self.forward_table_id,
priority=self.service_priority, match=match,
actions=forward_actions, idle_timeout=self.IDLE_TIME)
# inside - outside set-field( Table 4)
ofp_helper.add_flow(datapath, table_id=self.egress_table_id,
priority=self.service_priority, match=match,
actions=actions, idle_timeout=self.IDLE_TIME)
actions.append(parser.OFPActionOutput(out_port))
# send first packet back to switch
d = None
if buffer_id == ofproto.OFP_NO_BUFFER:
d = data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=buffer_id,
in_port=in_port, actions=actions, data=d)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
if not service_config.service_status['nat']:
return
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
pkt_arp = pkt.get_protocol(arp.arp)
pkt_ip = pkt.get_protocol(ipv4.ipv4)
pkt_tcp = pkt.get_protocol(tcp.tcp)
pkt_udp = pkt.get_protocol(udp.udp)
# if IP_TO_MAC_TABLE:
# print IP_TO_MAC_TABLE
if in_port == self.wan_port:
# Packets from WAN port
if pkt_arp:
if pkt_arp.opcode == arp.ARP_REQUEST:
arp_reply_pkt = self._arp_request_handler(pkt_arp)
if arp_reply_pkt is not None:
self._send_packet_to_port(datapath, in_port, arp_reply_pkt)
else:
# if arp_reply_pkt havent been generate, no need to send
pass
elif pkt_arp.opcode == arp.ARP_REPLY:
self._arp_reply_handler(pkt_arp)
else:
# DNAT Part
pass
else:
# Packets from LAN port
if pkt_ip:
if (self._in_private_subnetwork(pkt_ip.dst) and
pkt_ip.dst != str(self.private_gateway)):
# These packets are just in private network
# l2switch will handle it
return
ip_dst = pkt_ip.dst
if (self._is_public(ip_dst) and not
self._in_public_ip_subnetwork(ip_dst)):
# If the ip_dst of packet is public ip and on Internet
target_ip = self.public_gateway
elif self._in_public_ip_subnetwork(ip_dst):
# If the ip_dst of packet is in public subnetwork of NAT
target_ip = ip_dst
else:
return
# Sending ARP request to Gateway
arp_req_pkt = nat_helper.broadcast_arp_request(src_mac=self.mac_on_wan,
src_ip=self.public_ip,
target_ip=target_ip)
self._send_packet_to_port(datapath, self.wan_port, arp_req_pkt)
if pkt_tcp:
if target_ip in IP_TO_MAC_TABLE:
self._private_to_public(datapath=datapath,
buffer_id=msg.buffer_id,
data=msg.data,
in_port=in_port,
out_port=self.wan_port,
pkt_ethernet=pkt_ethernet,
pkt_ip=pkt_ip,
pkt_tcp=pkt_tcp)
elif pkt_udp:
if target_ip in IP_TO_MAC_TABLE:
self._private_to_public(datapath=datapath,
buffer_id=msg.buffer_id,
data=msg.data,
in_port=in_port,
out_port=self.wan_port,
pkt_ethernet=pkt_ethernet,
pkt_ip=pkt_ip,
pkt_udp=pkt_udp)
elif pkt_arp:
if pkt_arp.opcode == arp.ARP_REQUEST:
arp_reply_pkt = self._arp_request_handler(pkt_arp)
self._send_packet_to_port(datapath, in_port, arp_reply_pkt)
elif pkt_arp.opcode == arp.ARP_REPLY:
pass
class SNATRest(ControllerBase):
def __init__(self, req, link, data, **config):
super(SNATRest, self).__init__(req, link, data, **config)
self.snat_app = data[nat_instance_name]
@route('post_nat_config_init', urls.post_nat_config_init, methods=['POST'])
def nat_config_init(self, req, **kwargs):
save_dict = {}
save_dict['wan_port'] = 1
save_dict['public_ip'] = IPAddress('140.114.71.178')
save_dict['public_gateway'] = IPAddress('140.114.71.254')
save_dict['public_ip_subnetwork'] = IPNetwork('140.114.71.0/24')
network = '192.168.8.0/24'
save_dict['private_subnetwork'] = IPNetwork(network)
save_dict['private_gateway'] = IPNetwork(network)[1]
save_dict['broadcast_ip'] = IPAddress('192.168.8.255')
save_dict['dns_ip'] = IPAddress('8.8.8.8')
save_dict['mac_on_dhcp'] = '08:00:27:b8:0f:8d'
save_dict['mac_on_wan'] = '00:0e:c6:87:a6:fb'
save_dict['mac_on_lan'] = '00:0e:c6:87:a6:fa'
if nat_settings.save(save_dict):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(save_dict)
return Response(status=200)
else:
return Response(status=400)
@route('put_nat_config_save', urls.put_nat_config_save, methods=['PUT'])
def nat_config_save(self, req, **kwargs):
json_body = json.loads(req.body)
save_dict = nat_settings.load()
if save_dict is None:
save_dict = {}
save_dict['wan_port'] = json_body.get('wanPort')
save_dict['public_ip'] = IPAddress(json_body.get('publicIP'))
public_gateway = json_body.get('publicGateway')
save_dict['public_gateway'] = IPAddress(public_gateway)
save_dict['public_ip_subnetwork'] = IPNetwork(public_gateway + '/24')
net = json_body.get('privateNetwork') + '/24'
save_dict['private_subnetwork'] = IPNetwork(net)
save_dict['private_gateway'] = IPNetwork(net)[1]
save_dict['broadcast_ip'] = IPNetwork(net)[255]
save_dict['dns_ip'] = IPAddress('8.8.8.8')
save_dict['mac_on_dhcp'] = '08:00:27:b8:0f:8d'
save_dict['mac_on_wan'] = '00:0e:c6:87:a6:fb'
save_dict['mac_on_lan'] = '00:0e:c6:87:a6:fa'
if nat_settings.save(save_dict):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(save_dict)
return Response(status=200)
else:
return Response(status=400)
@route('get_nat_config', urls.get_nat_config, methods=['GET'])
def nat_config_get(self, req, **kwargs):
settings = nat_settings.load()
dic = {}
# local network
ip = settings['private_subnetwork'].ip
mask = settings['private_subnetwork'].netmask
mask_len = mask.bits().replace('.', '').find('0')
dic['privateNetwork'] = str(ip) + '/' + str(mask_len)
dic['wanPort'] = settings['wan_port']
dic['publicGateway'] = str(settings['public_gateway'])
dic['publicIP'] = str(settings['public_ip'])
body = json.dumps(dic)
return Response(status=200, content_type='application/json', body=body)
@route('get_dhcp_config', urls.get_dhcp_config, methods=['GET'])
def dhcp_config_get(self, req, **kwargs):
dhcp_settings = nat_settings.load()
dic = {}
dic['privateGateway'] = str(dhcp_settings['private_gateway'])
dic['broadcastIP'] = str(dhcp_settings['broadcast_ip'])
dic['dnsIP'] = str(dhcp_settings['dns_ip'])
dic['macDhcp'] = str(dhcp_settings['mac_on_dhcp'])
body = json.dumps(dic)
return Response(status=200, content_type='application/json', body=body)
|
|
from __future__ import unicode_literals
import copy
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, six.string_types):
split = model.split('.', 1)
return (tuple(split) if len(split) == 2 else (app_label, split[0]))
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""
Return all models that have a direct relationship to the given model.
"""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, six.string_types):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
related_models = set()
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
# The lock gets in the way of copying as implemented in clone(), which
# is called whenever Django duplicates a StateApps before updating it.
self._lock = None
self.render_multiple(list(models.values()) + self.real_models)
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.options.setdefault('indexes', [])
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
manager_name = force_text(manager.name)
if manager_name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager_name)
managers.append((manager_name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return force_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
mgr_name = force_text(mgr_name)
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Managers from concrete parents will soon qualify as default managers",
RemovedInDjango20Warning)
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
|
|
#!/usr/bin/env python
import base64
import re
import logging
import urllib
import time
from hashlib import sha1
from urlparse import parse_qs
from Cookie import SimpleCookie
from saml2 import server
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_URI
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import time_util
from saml2.httputil import Response, NotFound
from saml2.httputil import get_post
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.ident import Unknown
from saml2.s_utils import rndstr, UnknownPrincipal, UnsupportedBinding
from saml2.s_utils import PolicyError
from saml2.saml import AUTHN_PASSWORD
logger = logging.getLogger("saml2.idp")
def _expiration(timeout, format="%a, %d-%b-%Y %H:%M:%S GMT"):
if timeout == "now":
return time_util.instant(format)
elif timeout == "dawn":
return time.strftime(format, time.gmtime(0))
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=format)
# -----------------------------------------------------------------------------
def dict_to_table(ava, lev=0, width=1):
txt = ['<table border=%s bordercolor="black">\n' % width]
for prop, valarr in ava.items():
txt.append("<tr>\n")
if isinstance(valarr, basestring):
txt.append("<th>%s</th>\n" % str(prop))
try:
txt.append("<td>%s</td>\n" % valarr.encode("utf8"))
except AttributeError:
txt.append("<td>%s</td>\n" % valarr)
elif isinstance(valarr, list):
index = 0
num = len(valarr)
for val in valarr:
if not index:
txt.append("<th rowspan=%d>%s</td>\n" % (len(valarr), prop))
else:
txt.append("<tr>\n")
if isinstance(val, dict):
txt.append("<td>\n")
txt.extend(dict_to_table(val, lev+1, width-1))
txt.append("</td>\n")
else:
try:
txt.append("<td>%s</td>\n" % val.encode("utf8"))
except AttributeError:
txt.append("<td>%s</td>\n" % val)
if num > 1:
txt.append("</tr>\n")
num -= 1
index += 1
elif isinstance(valarr, dict):
txt.append("<th>%s</th>\n" % prop)
txt.append("<td>\n")
txt.extend(dict_to_table(valarr, lev+1, width-1))
txt.append("</td>\n")
txt.append("</tr>\n")
txt.append('</table>\n')
return txt
def unpack_redirect(environ):
if "QUERY_STRING" in environ:
_qs = environ["QUERY_STRING"]
return dict([(k,v[0]) for k,v in parse_qs(_qs).items()])
else:
return None
def unpack_post(environ):
try:
return dict([(k,v[0]) for k,v in parse_qs(get_post(environ))])
except Exception:
return None
def unpack_soap(environ):
try:
query = get_post(environ)
return {"SAMLRequest": query, "RelayState": ""}
except Exception:
return None
def unpack_artifact(environ):
if environ["REQUEST_METHOD"] == "GET":
_dict = unpack_redirect(environ)
elif environ["REQUEST_METHOD"] == "POST":
_dict = unpack_post(environ)
else:
_dict = None
return _dict
def dict2list_of_tuples(d):
return [(k,v) for k,v in d.items()]
# -----------------------------------------------------------------------------
def _operation(environ, start_response, user, _dict, func, binding,
**kwargs):
logger.debug("_operation: %s" % _dict)
if not _dict:
resp = BadRequest('Error parsing request or no request')
return resp(environ, start_response)
else:
return func(environ, start_response, user, _dict["SAMLRequest"],
binding, _dict["RelayState"], **kwargs)
def _artifact_oper(environ, start_response, user, _dict, func):
if not _dict:
resp = BadRequest("Missing query")
return resp(environ, start_response)
else:
# exchange artifact for request
request = IDP.artifact2message(_dict["SAMLart"], "spsso")
return func(environ, start_response, user, request,
BINDING_HTTP_ARTIFACT, _dict["RelayState"])
def _response(environ, start_response, binding, http_args):
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
else:
resp = Response(http_args["data"], headers=http_args["headers"])
return resp(environ, start_response)
# -----------------------------------------------------------------------------
AUTHN = (AUTHN_PASSWORD, "http://lingon.catalogix.se/login")
REPOZE_ID_EQUIVALENT = "uid"
FORM_SPEC = """<form name="myform" method="post" action="%s">
<input type="hidden" name="SAMLResponse" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>"""
# -----------------------------------------------------------------------------
# === Single log in ====
# -----------------------------------------------------------------------------
def _sso(environ, start_response, user, query, binding, relay_state="",
response_bindings=None):
logger.info("--- In SSO ---")
logger.debug("user: %s" % user)
if not query:
logger.info("Missing QUERY")
resp = Unauthorized('Unknown user')
return resp(environ, start_response)
# base 64 encoded request
req_info = IDP.parse_authn_request(query, binding=binding)
logger.info("parsed OK")
logger.info("%s" % req_info)
_authn_req = req_info.message
try:
resp_args = IDP.response_args(_authn_req)
except UnknownPrincipal, excp:
#IDP.create_error_response()
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(environ, start_response)
except UnsupportedBinding, excp:
#IDP.create_error_response()
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(environ, start_response)
identity = USERS[user]
logger.info("Identity: %s" % (identity,))
if REPOZE_ID_EQUIVALENT:
identity[REPOZE_ID_EQUIVALENT] = user
try:
authn_resp = IDP.create_authn_response(identity, userid=user,
authn=AUTHN, **resp_args)
except Exception, excp:
logger.error("Exception: %s" % (excp,))
resp = ServiceError("Exception: %s" % (excp,))
return resp(environ, start_response)
logger.info("AuthNResponse: %s" % authn_resp)
binding, destination = IDP.pick_binding("assertion_consumer_service",
bindings=response_bindings,
entity_id=_authn_req.issuer.text)
logger.debug("Binding: %s, destination: %s" % (binding, destination))
http_args = IDP.apply_binding(binding, "%s" % authn_resp, destination,
relay_state, response=True)
return _response(environ, start_response, binding, http_args)
def sso(environ, start_response, user):
""" This is the HTTP-redirect endpoint """
_dict = unpack_redirect(environ)
logger.debug("_dict: %s" % _dict)
# pick up the stored original query
logger.debug("keys: %s" % IDP.ticket.keys())
_req = IDP.ticket[_dict["key"]]
del IDP.ticket[_dict["key"]]
return _operation(environ, start_response, user, _req, _sso,
BINDING_HTTP_REDIRECT)
def sso_post(environ, start_response, user):
"""
The HTTP-Post endpoint
"""
logger.info("--- In SSO POST ---")
logger.debug("user: %s" % user)
_dict = unpack_post(environ)
logger.debug("message: %s" % _dict)
logger.debug("keys: %s" % IDP.ticket.keys())
_request = IDP.ticket[_dict["key"]]
del IDP.ticket[_dict["key"]]
return _operation(environ, start_response, user, _request, _sso,
BINDING_HTTP_POST)
def sso_art(environ, start_response, user):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = unpack_artifact(environ)
_request = IDP.ticket[_dict["key"]]
del IDP.ticket[_dict["key"]]
return _artifact_oper(environ, start_response, user, _request, _sso)
def sso_ecp(environ, start_response, user):
# The ECP interface
logger.info("--- ECP SSO ---")
logger.debug("ENVIRON: %s" % environ)
resp = None
try:
authz_info = environ["HTTP_AUTHORIZATION"]
if authz_info.startswith("Basic "):
_info = base64.b64decode(authz_info[6:])
logger.debug("Authz_info: %s" % _info)
try:
(user,passwd) = _info.split(":")
if PASSWD[user] != passwd:
resp = Unauthorized()
except ValueError:
resp = Unauthorized()
else:
resp = Unauthorized()
except KeyError:
resp = Unauthorized()
if resp:
return resp(environ, start_response)
_dict = unpack_soap(environ)
# Basic auth ?!
return _operation(environ, start_response, user, _dict, _sso, BINDING_SOAP,
response_bindings=[BINDING_PAOS])
# -----------------------------------------------------------------------------
# === Authentication ====
# -----------------------------------------------------------------------------
def not_authn(environ, start_response):
# store the request and redirect to login page
logger.info("not_authn ENV: %s" % environ)
loc = "http://%s/login" % (environ["HTTP_HOST"])
if environ["REQUEST_METHOD"] == "GET":
_dict = unpack_redirect(environ)
elif environ["REQUEST_METHOD"] == "POST":
_dict = unpack_post(environ)
else:
_dict = None
if not _dict:
resp = BadRequest("Missing query")
else:
logger.info("query: %s" % _dict)
# store the original request
key = sha1("%s" % _dict).hexdigest()
IDP.ticket[str(key)] = _dict
loc += "?%s" % urllib.urlencode({"came_from": environ["PATH_INFO"],
"key": key})
headers = [('Content-Type', 'text/plain')]
logger.debug("location: %s" % loc)
logger.debug("headers: %s" % headers)
resp = Redirect(loc, headers=headers)
return resp(environ, start_response)
def do_authentication(environ, start_response, cookie=None):
"""
Display the login form
"""
query = parse_qs(environ["QUERY_STRING"])
logger.info("The login page")
if cookie:
headers = [cookie]
else:
headers = []
resp = Response(mako_template="login.mako", template_lookup=LOOKUP,
headers=headers)
argv = {
"action": "/verify",
"came_from": query["came_from"][0],
"login": "",
"password": "",
"key": query["key"][0]
}
logger.info("do_authentication argv: %s" % argv)
return resp(environ, start_response, **argv)
def verify_username_and_password(dic):
global PASSWD
# verify username and password
if PASSWD[dic["login"][0]] == dic["password"][0]:
return True, dic["login"][0]
else:
return False, ""
def do_verify(environ, start_response, _user):
query = parse_qs(get_post(environ))
logger.debug("do_verify: %s" % query)
_ok, user = verify_username_and_password(query)
if not _ok:
resp = Unauthorized("Unknown user or wrong password")
else:
id = rndstr()
IDP.authn[id] = user
logger.debug("Register %s under '%s'" % (user, id))
kaka = set_cookie("idpauthn", "/", id)
lox = "http://%s%s?id=%s&key=%s" % (environ["HTTP_HOST"],
query["came_from"][0], id,
query["key"][0])
logger.debug("Redirect => %s" % lox)
resp = Redirect(lox, headers=[kaka], content="text/html")
return resp(environ, start_response)
# -----------------------------------------------------------------------------
# === Single log out ===
# -----------------------------------------------------------------------------
#def _subject_sp_info(req_info):
# # look for the subject
# subject = req_info.subject_id()
# subject = subject.text.strip()
# sp_entity_id = req_info.message.issuer.text.strip()
# return subject, sp_entity_id
def _slo(environ, start_response, _, request, binding, relay_state=""):
logger.info("--- Single Log Out Service ---")
try:
req_info = IDP.parse_logout_request(request, binding)
except Exception, exc:
logger.error("Bad request: %s" % exc)
resp = BadRequest("%s" % exc)
return resp(environ, start_response)
msg = req_info.message
if msg.name_id:
lid = IDP.ident.find_local_id(msg.name_id)
logger.info("local identifier: %s" % lid)
# remove the authentication
try:
IDP.remove_authn_statements(msg.name_id)
except KeyError,exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(environ, start_response)
resp = IDP.create_logout_response(msg)
try:
hinfo = IDP.apply_binding(binding, "%s" % resp, "", relay_state)
except Exception, exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(environ, start_response)
logger.info("Header: %s" % (hinfo["headers"],))
#_tlh = dict2list_of_tuples(hinfo["headers"])
delco = delete_cookie(environ, "idpauthn")
if delco:
hinfo["headers"].append(delco)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
# -- bindings --
def slo(environ, start_response, user):
""" Expects a HTTP-redirect logout request """
_dict = unpack_redirect(environ)
return _operation(environ, start_response, user, _dict, _slo,
BINDING_HTTP_REDIRECT)
def slo_post(environ, start_response, user):
""" Expects a HTTP-POST logout request """
_dict = unpack_post(environ)
return _operation(environ, start_response, user, _dict, _slo,
BINDING_HTTP_POST)
def slo_art(environ, start_response, user):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = unpack_artifact(environ)
return _artifact_oper(environ, start_response, user, _dict, _slo)
def slo_soap(environ, start_response, user=None):
"""
Single log out using HTTP_SOAP binding
"""
_dict = unpack_soap(environ)
return _operation(environ, start_response, user, _dict, _slo,
BINDING_SOAP)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def whoami(environ, start_response, user):
start_response('200 OK', [('Content-Type', 'text/html')])
identity = USERS[user].copy()
for prop in ["login", "password"]:
try:
del identity[prop]
except KeyError:
continue
response = dict_to_table(identity)
return response[:]
def not_found(environ, start_response):
"""Called if no URL matches."""
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
return ['Not Found']
PASSWD = {"roland": "dianakra",
"babs": "howes",
"upper": "crust"}
# ----------------------------------------------------------------------------
def kaka2user(kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get("idpauthn", None)
if morsel:
return IDP.authn[morsel.value]
else:
logger.debug()
return None
# ----------------------------------------------------------------------------
# Manage Name ID service
# ----------------------------------------------------------------------------
def _mni(environ, start_response, user, query, binding, relay_state=""):
logger.info("--- Manage Name ID Service ---")
req = IDP.parse_manage_name_id_request(query, binding)
request = req.message
# Do the necessary stuff
name_id = IDP.ident.handle_manage_name_id_request(request.name_id,
request.new_id,
request.new_encrypted_id,
request.terminate)
logger.debug("New NameID: %s" % name_id)
_resp = IDP.create_manage_name_id_response(request)
# It's using SOAP binding
hinfo = IDP.apply_binding(binding, "%s" % _resp, "", relay_state,
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
def mni(environ, start_response, user):
""" Expects a HTTP-redirect logout request """
_dict = unpack_redirect(environ)
return _operation(environ, start_response, user, _dict, _mni,
BINDING_HTTP_REDIRECT)
def mni_post(environ, start_response, user):
""" Expects a HTTP-POST logout request """
_dict = unpack_post(environ)
return _operation(environ, start_response, user, _dict, _mni,
BINDING_HTTP_POST)
def mni_soap(environ, start_response, user):
_dict = unpack_soap(environ)
return _operation(environ, start_response, user, _dict, _mni,
BINDING_SOAP)
def mni_art(environ, start_response, user):
# Could be by HTTP_REDIRECT or HTTP_POST
_dict = unpack_post(environ)
return _artifact_oper(environ, start_response, user, _dict, _mni)
# ----------------------------------------------------------------------------
# === Assertion ID request ===
# ----------------------------------------------------------------------------
# Only URI binding
def assertion_id_request(environ, start_response, user=None):
logger.info("--- Assertion ID Service ---")
_binding = BINDING_URI
_dict = unpack_artifact(environ)
logger.debug("INPUT: %s" % _dict)
# Presently only HTTP GET is supported
if "ID" in _dict:
aid = _dict["ID"]
else:
resp = BadRequest("Missing or faulty request")
return resp(environ, start_response)
try:
assertion = IDP.create_assertion_id_request_response(aid)
except Unknown:
resp = NotFound(aid)
return resp(environ, start_response)
hinfo = IDP.apply_binding(_binding, "%s" % assertion, response=True)
logger.debug("HINFO: %s" % hinfo)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
# ----------------------------------------------------------------------------
# === Artifact resolve service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
def artifact_resolve_service(environ, start_response, user=None):
"""
:param environ: Execution environment
:param start_response: Function to start the response with
"""
logger.info("--- Artifact resolve Service ---")
_dict = unpack_soap(environ)
_binding = BINDING_SOAP
if not _dict:
resp = BadRequest("Missing or faulty request")
return resp(environ, start_response)
_req = IDP.parse_artifact_resolve("%s" % _dict["SAMLRequest"], _binding)
msg = IDP.create_artifact_response(_req, _req.artifact.text)
hinfo = IDP.apply_binding(_binding, "%s" % msg, "","",response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
# ----------------------------------------------------------------------------
# === Authn query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
def authn_query_service(environ, start_response, user=None):
"""
:param environ: Execution environment
:param start_response: Function to start the response with
"""
logger.info("--- Authn Query Service ---")
_dict = unpack_soap(environ)
_binding = BINDING_SOAP
if not _dict:
resp = BadRequest("Missing or faulty request")
return resp(environ, start_response)
_req = IDP.parse_authn_query("%s" % _dict["SAMLRequest"], _binding)
_query = _req.message
msg = IDP.create_authn_query_response(_query.subject,
_query.requested_authn_context,
_query.session_index)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(_binding, "%s" % msg, "","",response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
# ----------------------------------------------------------------------------
# === Attribute query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
def attribute_query_service(environ, start_response, user=None):
"""
:param environ: Execution environment
:param start_response: Function to start the response with
"""
logger.info("--- Attribute Query Service ---")
_dict = unpack_soap(environ)
_binding = BINDING_SOAP
if not _dict:
resp = BadRequest("Missing or faulty request")
return resp(environ, start_response)
_req = IDP.parse_attribute_query("%s" % _dict["SAMLRequest"], _binding)
_query = _req.message
name_id = _query.subject.name_id
uid = IDP.ident.find_local_id(name_id)
logger.debug("Local uid: %s" % uid)
identity = EXTRA[uid]
# Comes in over SOAP so only need to construct the response
args = IDP.response_args(_query, [BINDING_SOAP])
msg = IDP.create_attribute_response(identity, destination="",
name_id=name_id, **args)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(_binding, "%s" % msg, "","",response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
# ----------------------------------------------------------------------------
# Name ID Mapping service
# When an entity that shares an identifier for a principal with an identity
# provider wishes to obtain a name identifier for the same principal in a
# particular format or federation namespace, it can send a request to
# the identity provider using this protocol.
# ----------------------------------------------------------------------------
def _nim(environ, start_response, user, query, binding, relay_state=""):
req = IDP.parse_name_id_mapping_request(query, binding)
request = req.message
# Do the necessary stuff
try:
name_id = IDP.ident.handle_name_id_mapping_request(request.name_id,
request.name_id_policy)
except Unknown:
resp = BadRequest("Unknown entity")
return resp(environ, start_response)
except PolicyError:
resp = BadRequest("Unknown entity")
return resp(environ, start_response)
info = IDP.response_args(request)
_resp = IDP.create_name_id_mapping_response(name_id, **info)
# Only SOAP
hinfo = IDP.apply_binding(binding, "%s" % _resp, "", "", response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(environ, start_response)
def nim_soap(environ, start_response, user):
_dict = unpack_soap(environ)
return _operation(environ, start_response, user, _dict, _nim, BINDING_SOAP)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def delete_cookie(environ, name):
kaka = environ.get("HTTP_COOKIE", '')
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(name, None)
cookie = SimpleCookie()
cookie[name] = ""
cookie[name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def set_cookie(name, path, value):
cookie = SimpleCookie()
cookie[name] = value
cookie[name]['path'] = "/"
cookie[name]["expires"] = _expiration(5) # 5 minutes from now
logger.debug("Cookie expires: %s" % cookie[name]["expires"])
return tuple(cookie.output().split(": ", 1))
# ----------------------------------------------------------------------------
# map urls to functions
AUTHN_URLS = [
(r'whoami$', whoami),
(r'whoami/(.*)$', whoami),
# sso
(r'sso/post$', sso_post),
(r'sso/post/(.*)$', sso_post),
(r'sso/redirect$', sso),
(r'sso/redirect/(.*)$', sso),
(r'sso/art$', sso),
(r'sso/art/(.*)$', sso),
# slo
(r'slo/redirect$', slo),
(r'slo/redirect/(.*)$', slo),
(r'slo/post$', slo_post),
(r'slo/post/(.*)$', slo_post),
(r'slo/soap$', slo_soap),
(r'slo/soap/(.*)$', slo_soap),
#
(r'airs$', assertion_id_request),
(r'ars$', artifact_resolve_service),
# mni
(r'mni/post$', mni_post),
(r'mni/post/(.*)$', mni_post),
(r'mni/redirect$', mni),
(r'mni/redirect/(.*)$', mni),
(r'mni/art$', mni_art),
(r'mni/art/(.*)$', mni_art),
(r'mni/soap$', mni_soap),
(r'mni/soap/(.*)$', mni_soap),
# nim
(r'nim$', nim_soap),
(r'nim/(.*)$', nim_soap),
#
(r'aqs$', authn_query_service),
(r'attr$', attribute_query_service)
]
NON_AUTHN_URLS = [
(r'login?(.*)$', do_authentication),
(r'verify?(.*)$', do_verify),
(r'sso/ecp$', sso_ecp),
]
# ----------------------------------------------------------------------------
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s" % path)
if kaka:
logger.info("= KAKA =")
user = kaka2user(kaka)
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s" % query)
user = IDP.authn[query["id"][0]]
except KeyError:
user = None
if not user:
logger.info("-- No USER --")
for regex, callback in NON_AUTHN_URLS:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.info("callback: %s" % (callback,))
return callback(environ, start_response, user)
for regex, callback in AUTHN_URLS:
match = re.search(regex, path)
if match is not None:
return not_authn(environ, start_response)
else:
for regex, callback in AUTHN_URLS:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.info("callback: %s" % (callback,))
return callback(environ, start_response, user)
return not_found(environ, start_response)
# ----------------------------------------------------------------------------
from mako.lookup import TemplateLookup
ROOT = './'
LOOKUP = TemplateLookup(directories=[ROOT + 'templates', ROOT + 'htdocs'],
module_directory=ROOT + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import sys
from idp_user import USERS
from idp_user import EXTRA
from wsgiref.simple_server import make_server
PORT = 8088
IDP = server.Server(sys.argv[1])
IDP.ticket = {}
SRV = make_server('', PORT, application)
print "IdP listening on port: %s" % PORT
SRV.serve_forever()
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
import os
import shutil
import six
from taskflow import exceptions as exc
from taskflow.openstack.common import jsonutils
from taskflow.persistence.backends import base
from taskflow.utils import lock_utils
from taskflow.utils import misc
from taskflow.utils import persistence_utils as p_utils
LOG = logging.getLogger(__name__)
class DirBackend(base.Backend):
"""A backend that writes logbooks, flow details, and task details to a
provided directory. This backend does *not* provide transactional semantics
although it does guarantee that there will be no race conditions when
writing/reading by using file level locking.
"""
def __init__(self, conf):
super(DirBackend, self).__init__(conf)
self._path = os.path.abspath(conf['path'])
self._lock_path = os.path.join(self._path, 'locks')
self._file_cache = {}
@property
def lock_path(self):
return self._lock_path
@property
def base_path(self):
return self._path
def get_connection(self):
return Connection(self)
def close(self):
pass
class Connection(base.Connection):
def __init__(self, backend):
self._backend = backend
self._file_cache = self._backend._file_cache
self._flow_path = os.path.join(self._backend.base_path, 'flows')
self._task_path = os.path.join(self._backend.base_path, 'tasks')
self._book_path = os.path.join(self._backend.base_path, 'books')
def validate(self):
# Verify key paths exist.
paths = [
self._backend.base_path,
self._backend.lock_path,
self._flow_path,
self._task_path,
self._book_path,
]
for p in paths:
if not os.path.isdir(p):
raise RuntimeError("Missing required directory: %s" % (p))
def _read_from(self, filename):
# This is very similar to the oslo-incubator fileutils module, but
# tweaked to not depend on a global cache, as well as tweaked to not
# pull-in the oslo logging module (which is a huge pile of code).
mtime = os.path.getmtime(filename)
cache_info = self._file_cache.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
with open(filename, 'rb') as fp:
cache_info['data'] = fp.read().decode('utf-8')
cache_info['mtime'] = mtime
return cache_info['data']
def _write_to(self, filename, contents):
if isinstance(contents, six.text_type):
contents = contents.encode('utf-8')
with open(filename, 'wb') as fp:
fp.write(contents)
self._file_cache.pop(filename, None)
def _run_with_process_lock(self, lock_name, functor, *args, **kwargs):
lock_path = os.path.join(self.backend.lock_path, lock_name)
with lock_utils.InterProcessLock(lock_path):
try:
return functor(*args, **kwargs)
except exc.TaskFlowException:
raise
except Exception as e:
LOG.exception("Failed running locking file based session")
# NOTE(harlowja): trap all other errors as storage errors.
raise exc.StorageError("Storage backend internal error", e)
def _get_logbooks(self):
lb_uuids = []
try:
lb_uuids = [d for d in os.listdir(self._book_path)
if os.path.isdir(os.path.join(self._book_path, d))]
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
for lb_uuid in lb_uuids:
try:
yield self._get_logbook(lb_uuid)
except exc.NotFound:
pass
def get_logbooks(self):
try:
books = list(self._get_logbooks())
except EnvironmentError as e:
raise exc.StorageError("Unable to fetch logbooks", e)
else:
for b in books:
yield b
@property
def backend(self):
return self._backend
def close(self):
pass
def _save_task_details(self, task_detail, ignore_missing):
# See if we have an existing task detail to merge with.
e_td = None
try:
e_td = self._get_task_details(task_detail.uuid, lock=False)
except EnvironmentError:
if not ignore_missing:
raise exc.NotFound("No task details found with id: %s"
% task_detail.uuid)
if e_td is not None:
task_detail = p_utils.task_details_merge(e_td, task_detail)
td_path = os.path.join(self._task_path, task_detail.uuid)
td_data = p_utils.format_task_detail(task_detail)
self._write_to(td_path, jsonutils.dumps(td_data))
return task_detail
def update_task_details(self, task_detail):
return self._run_with_process_lock("task",
self._save_task_details,
task_detail,
ignore_missing=False)
def _get_task_details(self, uuid, lock=True):
def _get():
td_path = os.path.join(self._task_path, uuid)
td_data = misc.decode_json(self._read_from(td_path))
return p_utils.unformat_task_detail(uuid, td_data)
if lock:
return self._run_with_process_lock('task', _get)
else:
return _get()
def _get_flow_details(self, uuid, lock=True):
def _get():
fd_path = os.path.join(self._flow_path, uuid)
meta_path = os.path.join(fd_path, 'metadata')
meta = misc.decode_json(self._read_from(meta_path))
fd = p_utils.unformat_flow_detail(uuid, meta)
td_to_load = []
td_path = os.path.join(fd_path, 'tasks')
try:
td_to_load = [f for f in os.listdir(td_path)
if os.path.islink(os.path.join(td_path, f))]
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
for t_uuid in td_to_load:
fd.add(self._get_task_details(t_uuid))
return fd
if lock:
return self._run_with_process_lock('flow', _get)
else:
return _get()
def _save_tasks_and_link(self, task_details, local_task_path):
for task_detail in task_details:
self._save_task_details(task_detail, ignore_missing=True)
src_td_path = os.path.join(self._task_path, task_detail.uuid)
target_td_path = os.path.join(local_task_path, task_detail.uuid)
try:
os.symlink(src_td_path, target_td_path)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def _save_flow_details(self, flow_detail, ignore_missing):
# See if we have an existing flow detail to merge with.
e_fd = None
try:
e_fd = self._get_flow_details(flow_detail.uuid, lock=False)
except EnvironmentError:
if not ignore_missing:
raise exc.NotFound("No flow details found with id: %s"
% flow_detail.uuid)
if e_fd is not None:
e_fd = p_utils.flow_details_merge(e_fd, flow_detail)
for td in flow_detail:
if e_fd.find(td.uuid) is None:
e_fd.add(td)
flow_detail = e_fd
flow_path = os.path.join(self._flow_path, flow_detail.uuid)
misc.ensure_tree(flow_path)
self._write_to(
os.path.join(flow_path, 'metadata'),
jsonutils.dumps(p_utils.format_flow_detail(flow_detail)))
if len(flow_detail):
task_path = os.path.join(flow_path, 'tasks')
misc.ensure_tree(task_path)
self._run_with_process_lock('task',
self._save_tasks_and_link,
list(flow_detail), task_path)
return flow_detail
def update_flow_details(self, flow_detail):
return self._run_with_process_lock("flow",
self._save_flow_details,
flow_detail,
ignore_missing=False)
def _save_flows_and_link(self, flow_details, local_flow_path):
for flow_detail in flow_details:
self._save_flow_details(flow_detail, ignore_missing=True)
src_fd_path = os.path.join(self._flow_path, flow_detail.uuid)
target_fd_path = os.path.join(local_flow_path, flow_detail.uuid)
try:
os.symlink(src_fd_path, target_fd_path)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def _save_logbook(self, book):
# See if we have an existing logbook to merge with.
e_lb = None
try:
e_lb = self._get_logbook(book.uuid)
except exc.NotFound:
pass
if e_lb is not None:
e_lb = p_utils.logbook_merge(e_lb, book)
for fd in book:
if e_lb.find(fd.uuid) is None:
e_lb.add(fd)
book = e_lb
book_path = os.path.join(self._book_path, book.uuid)
misc.ensure_tree(book_path)
created_at = None
if e_lb is not None:
created_at = e_lb.created_at
self._write_to(os.path.join(book_path, 'metadata'), jsonutils.dumps(
p_utils.format_logbook(book, created_at=created_at)))
if len(book):
flow_path = os.path.join(book_path, 'flows')
misc.ensure_tree(flow_path)
self._run_with_process_lock('flow',
self._save_flows_and_link,
list(book), flow_path)
return book
def save_logbook(self, book):
return self._run_with_process_lock("book",
self._save_logbook, book)
def upgrade(self):
def _step_create():
for path in (self._book_path, self._flow_path, self._task_path):
try:
misc.ensure_tree(path)
except EnvironmentError as e:
raise exc.StorageError("Unable to create logbooks"
" required child path %s" % path, e)
for path in (self._backend.base_path, self._backend.lock_path):
try:
misc.ensure_tree(path)
except EnvironmentError as e:
raise exc.StorageError("Unable to create logbooks required"
" path %s" % path, e)
self._run_with_process_lock("init", _step_create)
def clear_all(self):
def _step_clear():
for d in (self._book_path, self._flow_path, self._task_path):
if os.path.isdir(d):
shutil.rmtree(d)
def _step_task():
self._run_with_process_lock("task", _step_clear)
def _step_flow():
self._run_with_process_lock("flow", _step_task)
def _step_book():
self._run_with_process_lock("book", _step_flow)
# Acquire all locks by going through this little hierarchy.
self._run_with_process_lock("init", _step_book)
def destroy_logbook(self, book_uuid):
def _destroy_tasks(task_details):
for task_detail in task_details:
task_path = os.path.join(self._task_path, task_detail.uuid)
try:
shutil.rmtree(task_path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise exc.StorageError("Unable to remove task"
" directory %s" % task_path, e)
def _destroy_flows(flow_details):
for flow_detail in flow_details:
flow_path = os.path.join(self._flow_path, flow_detail.uuid)
self._run_with_process_lock("task", _destroy_tasks,
list(flow_detail))
try:
shutil.rmtree(flow_path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise exc.StorageError("Unable to remove flow"
" directory %s" % flow_path, e)
def _destroy_book():
book = self._get_logbook(book_uuid)
book_path = os.path.join(self._book_path, book.uuid)
self._run_with_process_lock("flow", _destroy_flows, list(book))
try:
shutil.rmtree(book_path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise exc.StorageError("Unable to remove book"
" directory %s" % book_path, e)
# Acquire all locks by going through this little hierarchy.
self._run_with_process_lock("book", _destroy_book)
def _get_logbook(self, book_uuid):
book_path = os.path.join(self._book_path, book_uuid)
meta_path = os.path.join(book_path, 'metadata')
try:
meta = misc.decode_json(self._read_from(meta_path))
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise exc.NotFound("No logbook found with id: %s" % book_uuid)
else:
raise
lb = p_utils.unformat_logbook(book_uuid, meta)
fd_path = os.path.join(book_path, 'flows')
fd_uuids = []
try:
fd_uuids = [f for f in os.listdir(fd_path)
if os.path.islink(os.path.join(fd_path, f))]
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
for fd_uuid in fd_uuids:
lb.add(self._get_flow_details(fd_uuid))
return lb
def get_logbook(self, book_uuid):
return self._run_with_process_lock("book",
self._get_logbook, book_uuid)
|
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.inverse_transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Create Station-model plots."""
from enum import Enum
import numpy as np
from .wx_symbols import (current_weather, high_clouds, low_clouds, mid_clouds,
pressure_tendency, sky_cover, wx_symbol_font)
from ..package_tools import Exporter
exporter = Exporter(globals())
@exporter.export
class StationPlot:
"""Make a standard meteorological station plot.
Plots values, symbols, or text spaced around a central location. Can also plot wind
barbs as the center of the location.
"""
location_names = {'C': (0, 0), 'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S': (0, -1), 'SW': (-1, -1), 'W': (-1, 0), 'NW': (-1, 1),
'N2': (0, 2), 'NNE': (1, 2), 'ENE': (2, 1), 'E2': (2, 0),
'ESE': (2, -1), 'SSE': (1, -2), 'S2': (0, -2), 'SSW': (-1, -2),
'WSW': (-2, -1), 'W2': (-2, 0), 'WNW': (-2, 1), 'NNW': (-1, 2)}
def __init__(self, ax, x, y, fontsize=10, spacing=None, transform=None, **kwargs):
"""Initialize the StationPlot with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`~matplotlib.axes.Axes` for plotting
x : array_like
The x location of the stations in the plot
y : array_like
The y location of the stations in the plot
fontsize : int
The fontsize to use for drawing text
spacing : int
The spacing, in points, that corresponds to a single increment between
station plot elements.
transform : matplotlib.transforms.Transform (or compatible)
The default transform to apply to the x and y positions when plotting.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
These will be passed to all the plotting methods, and thus need to be valid
for all plot types, such as `clip_on`.
"""
self.ax = ax
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.fontsize = fontsize
self.spacing = fontsize if spacing is None else spacing
self.transform = transform
self.items = {}
self.barbs = None
self.arrows = None
self.default_kwargs = kwargs
def plot_symbol(self, location, codes, symbol_mapper, **kwargs):
"""At the specified location in the station model plot a set of symbols.
This specifies that at the offset `location`, the data in `codes` should be
converted to unicode characters (for our :data:`wx_symbol_font`) using `symbol_mapper`,
and plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
codes : array_like
The numeric values that should be converted to unicode characters for plotting.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, current_weather_auto
from metpy.plots.wx_symbols import low_clouds, mid_clouds, high_clouds
from metpy.plots.wx_symbols import sky_cover, pressure_tendency
def plot_symbols(mapper, name, nwrap=10, figsize=(10, 1.4)):
# Determine how many symbols there are and layout in rows of nwrap
# if there are more than nwrap symbols
num_symbols = len(mapper)
codes = np.arange(len(mapper))
ncols = nwrap
if num_symbols <= nwrap:
nrows = 1
x = np.linspace(0, 1, len(mapper))
y = np.ones_like(x)
ax_height = 0.8
else:
nrows = int(ceil(num_symbols / ncols))
x = np.tile(np.linspace(0, 1, ncols), nrows)[:num_symbols]
y = np.repeat(np.arange(nrows, 0, -1), ncols)[:num_symbols]
figsize = (10, 1 * nrows + 0.4)
ax_height = 0.8 + 0.018 * nrows
fig = plt.figure(figsize=figsize, dpi=300)
ax = fig.add_axes([0, 0, 1, ax_height])
ax.set_title(name, size=20)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
# Plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_symbol('C', codes, mapper)
sp.plot_parameter((0, -1), codes, fontsize=18)
ax.set_ylim(-0.05, nrows + 0.5)
plt.show()
plot_symbols(current_weather, "Current Weather Symbols")
plot_symbols(current_weather_auto, "Current Weather Auto Reported Symbols")
plot_symbols(low_clouds, "Low Cloud Symbols")
plot_symbols(mid_clouds, "Mid Cloud Symbols")
plot_symbols(high_clouds, "High Cloud Symbols")
plot_symbols(sky_cover, "Sky Cover Symbols", nwrap=12)
plot_symbols(pressure_tendency, "Pressure Tendency Symbols")
See Also
--------
plot_barb, plot_parameter, plot_text
"""
# Make sure we use our font for symbols
kwargs['fontproperties'] = wx_symbol_font.copy()
return self.plot_parameter(location, codes, symbol_mapper, **kwargs)
def plot_parameter(self, location, parameter, formatter='.0f', **kwargs):
"""At the specified location in the station model plot a set of values.
This specifies that at the offset `location`, the data in `parameter` should be
plotted. The conversion of the data values to a string is controlled by `formatter`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
parameter : array_like
The numeric values that should be plotted
formatter : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_symbol, plot_text
"""
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
parameter = self._scalar_plotting_units(parameter, plotting_units)
if hasattr(parameter, 'units'):
parameter = parameter.magnitude
text = self._to_string_list(parameter, formatter)
return self.plot_text(location, text, **kwargs)
def plot_text(self, location, text, **kwargs):
"""At the specified location in the station model plot a collection of text.
This specifies that at the offset `location`, the strings in `text` should be
plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
text : list (or array) of strings
The strings that should be plotted
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_parameter, plot_symbol
"""
location = self._handle_location(location)
kwargs = self._make_kwargs(kwargs)
text_collection = self.ax.scattertext(self.x, self.y, text, loc=location,
size=kwargs.pop('fontsize', self.fontsize),
**kwargs)
if location in self.items:
self.items[location].remove()
self.items[location] = text_collection
return text_collection
def plot_barb(self, u, v, **kwargs):
r"""At the center of the station model plot wind barbs.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the barbs.
v : array-like
The data to use for the v-component of the barbs.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_arrow, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
# Empirically determined
pivot = 0.51 * np.sqrt(self.fontsize)
length = 1.95 * np.sqrt(self.fontsize)
defaults = {'sizes': {'spacing': .15, 'height': 0.5, 'emptybarb': 0.35},
'length': length, 'pivot': pivot}
defaults.update(kwargs)
# Remove old barbs
if self.barbs:
self.barbs.remove()
self.barbs = self.ax.barbs(self.x, self.y, u, v, **defaults)
def plot_arrow(self, u, v, **kwargs):
r"""At the center of the station model plot wind arrows.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.quiver` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the arrows.
v : array-like
The data to use for the v-component of the arrows.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_barb, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
defaults = {'pivot': 'tail', 'scale': 20, 'scale_units': 'inches', 'width': 0.002}
defaults.update(kwargs)
# Remove old arrows
if self.arrows:
self.arrows.remove()
self.arrows = self.ax.quiver(self.x, self.y, u, v, **defaults)
@staticmethod
def _vector_plotting_units(u, v, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Strip units, CartoPy transform doesn't like
u = np.array(u)
v = np.array(v)
return u, v
@staticmethod
def _scalar_plotting_units(scalar_value, plotting_units):
"""Handle conversion to plotting units for non-vector quantities."""
if plotting_units:
if hasattr(scalar_value, 'units'):
scalar_value = scalar_value.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'scalar value being converted.')
return scalar_value
def _make_kwargs(self, kwargs):
"""Assemble kwargs as necessary.
Inserts our defaults as well as ensures transform is present when appropriate.
"""
# Use default kwargs and update with additional ones
all_kw = self.default_kwargs.copy()
all_kw.update(kwargs)
# Pass transform if necessary
if 'transform' not in all_kw and self.transform:
all_kw['transform'] = self.transform
return all_kw
@staticmethod
def _to_string_list(vals, fmt):
"""Convert a sequence of values to a list of strings."""
if not callable(fmt):
def formatter(s):
"""Turn a format string into a callable."""
return format(s, fmt)
else:
formatter = fmt
return [formatter(v) if np.isfinite(v) else '' for v in vals]
def _handle_location(self, location):
"""Process locations to get a consistent set of tuples for location."""
if isinstance(location, str):
location = self.location_names[location]
xoff, yoff = location
return xoff * self.spacing, yoff * self.spacing
@exporter.export
class StationPlotLayout(dict):
r"""make a layout to encapsulate plotting using :class:`StationPlot`.
This class keeps a collection of offsets, plot formats, etc. for a parameter based
on its name. This then allows a dictionary of data (or any object that allows looking
up of arrays based on a name) to be passed to :meth:`plot()` to plot the data all at once.
See Also
--------
StationPlot
"""
class PlotTypes(Enum):
r"""Different plotting types for the layout.
Controls how items are displayed (e.g. converting values to symbols).
"""
value = 1
symbol = 2
text = 3
barb = 4
def add_value(self, location, name, fmt='.0f', units=None, **kwargs):
r"""Add a numeric value to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. The conversion of the data values to
a string is controlled by `fmt`. The units required for plotting can also
be passed in using `units`, which will cause the data to be converted before
plotting.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
fmt : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_text
"""
self[location] = (self.PlotTypes.value, name, (fmt, units, kwargs))
def add_symbol(self, location, name, symbol_mapper, **kwargs):
r"""Add a symbol to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. Data values will converted to glyphs
appropriate for MetPy's symbol font using the callable `symbol_mapper`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_text, add_value
"""
self[location] = (self.PlotTypes.symbol, name, (symbol_mapper, kwargs))
def add_text(self, location, name, **kwargs):
r"""Add a text field to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted directly as text with no conversion
applied.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple(float, float)
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_value
"""
self[location] = (self.PlotTypes.text, name, kwargs)
def add_barb(self, u_name, v_name, units=None, **kwargs):
r"""Add a wind barb to the center of the station layout.
This specifies that u- and v-component data should be pulled from the data
container using the keys `u_name` and `v_name`, respectively, and plotted as
a wind barb at the center of the station plot. If `units` are given, both
components will be converted to these units.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or line width.
Parameters
----------
u_name : str
The name of the parameter for the u-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
v_name : str
The name of the parameter for the v-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
add_symbol, add_text, add_value
"""
# Not sure if putting the v_name as a plot-specific option is appropriate,
# but it seems simpler than making name code in plot handle tuples
self['barb'] = (self.PlotTypes.barb, (u_name, v_name), (units, kwargs))
def names(self):
"""Get the list of names used by the layout.
Returns
-------
list[str]
the list of names of variables used by the layout
"""
ret = []
for item in self.values():
if item[0] == self.PlotTypes.barb:
ret.extend(item[1])
else:
ret.append(item[1])
return ret
def plot(self, plotter, data_dict):
"""Plot a collection of data using this layout for a station plot.
This function iterates through the entire specified layout, pulling the fields named
in the layout from `data_dict` and plotting them using `plotter` as specified
in the layout. Fields present in the layout, but not in `data_dict`, are ignored.
Parameters
----------
plotter : StationPlot
:class:`StationPlot` to use to plot the data. This controls the axes,
spacing, station locations, etc.
data_dict : dict[str, array-like]
Data container that maps a name to an array of data. Data from this object
will be used to fill out the station plot.
"""
def coerce_data(dat, u):
try:
return dat.to(u).magnitude
except AttributeError:
return dat
for loc, info in self.items():
typ, name, args = info
if typ == self.PlotTypes.barb:
# Try getting the data
u_name, v_name = name
u_data = data_dict.get(u_name)
v_data = data_dict.get(v_name)
# Plot if we have the data
if not (v_data is None or u_data is None):
units, kwargs = args
plotter.plot_barb(coerce_data(u_data, units), coerce_data(v_data, units),
**kwargs)
else:
# Check that we have the data for this location
data = data_dict.get(name)
if data is not None:
# If we have it, hand it to the appropriate method
if typ == self.PlotTypes.value:
fmt, units, kwargs = args
plotter.plot_parameter(loc, coerce_data(data, units), fmt, **kwargs)
elif typ == self.PlotTypes.symbol:
mapper, kwargs = args
plotter.plot_symbol(loc, data, mapper, **kwargs)
elif typ == self.PlotTypes.text:
plotter.plot_text(loc, data, **args)
def __repr__(self):
"""Return string representation of layout."""
return ('{'
+ ', '.join('{0}: ({1[0].name}, {1[1]}, ...)'.format(loc, info)
for loc, info in sorted(self.items()))
+ '}')
with exporter:
#: :desc: Simple station plot layout
simple_layout = StationPlotLayout()
simple_layout.add_barb('eastward_wind', 'northward_wind', 'knots')
simple_layout.add_value('NW', 'air_temperature', units='degC')
simple_layout.add_value('SW', 'dew_point_temperature', units='degC')
simple_layout.add_value('NE', 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
simple_layout.add_symbol('C', 'cloud_coverage', sky_cover)
simple_layout.add_symbol('W', 'current_wx1_symbol', current_weather)
#: Full NWS station plot `layout`__
#:
#: __ http://oceanservice.noaa.gov/education/yos/resource/JetStream/synoptic/wxmaps.htm
nws_layout = StationPlotLayout()
nws_layout.add_value((-1, 1), 'air_temperature', units='degF')
nws_layout.add_symbol((0, 2), 'high_cloud_type', high_clouds)
nws_layout.add_symbol((0, 1), 'medium_cloud_type', mid_clouds)
nws_layout.add_symbol((0, -1), 'low_cloud_type', low_clouds)
nws_layout.add_value((1, 1), 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
nws_layout.add_value((-2, 0), 'visibility_in_air', fmt='.0f', units='miles')
nws_layout.add_symbol((-1, 0), 'current_wx1_symbol', current_weather)
nws_layout.add_symbol((0, 0), 'cloud_coverage', sky_cover)
nws_layout.add_value((1, 0), 'tendency_of_air_pressure', units='mbar',
fmt=lambda v: ('-' if v < 0 else '') + format(10 * abs(v), '02.0f'))
nws_layout.add_symbol((2, 0), 'tendency_of_air_pressure_symbol', pressure_tendency)
nws_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
nws_layout.add_value((-1, -1), 'dew_point_temperature', units='degF')
# TODO: Fix once we have the past weather symbols converted
nws_layout.add_symbol((1, -1), 'current_wx2_symbol', current_weather)
|
|
"""
Copyright 2015 Logvinenko Maksim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import struct
import array
from photon.operations import OperationRequest, OperationResponse, EventData
from photon.typeddict import typed_dict
def serialize_op_request(op_request):
out = bytearray()
_serialize_op_request(out, op_request, False)
return out
def deserialize_event_data(buf):
result = EventData()
result.code = _deserialize_byte(buf)
result.params = _deserialize_parameters(buf)
return result
def deserialize_op_request(buf):
result = OperationRequest()
result.op_code = _deserialize_byte(buf)
result.params = _deserialize_parameters(buf)
return result
def deserialize_op_response(buf):
result = OperationResponse()
result.op_code = _deserialize_byte(buf)
result.return_code = _deserialize_short(buf)
result.debug_message = _deserialize(buf)
result.params = _deserialize_parameters(buf)
return result
# private methods
def _serialize(out, value, set_type):
if value is None:
out.append(42)
return
v_type = type(value)
if str == v_type:
_serialize_string(out, value, set_type)
elif bool == v_type:
_serialize_boolean(out, value, set_type)
elif int == v_type:
byte_cnt = (value.bit_length() + 7) // 8
if byte_cnt == 1:
_serialize_byte(out, value, set_type)
elif byte_cnt == 2:
_serialize_short(out, value, set_type)
elif byte_cnt <= 4:
_serialize_integer(out, value, set_type)
elif byte_cnt <= 8:
_serialize_long(out, value, set_type)
elif float == v_type:
_serialize_double(out, value, set_type)
elif bytearray == v_type:
_serialize_bytearray(out, value, set_type)
elif array.array == v_type:
_serialize_array(out, value, set_type)
elif dict == v_type:
_serialize_dict(out, value, set_type)
elif typed_dict == v_type:
_serialize_typed_dict(out, value, set_type)
elif list == v_type:
_serialize_list(out, value, set_type)
elif OperationRequest == v_type:
_serialize_op_request(out, value, set_type)
elif OperationResponse == v_type:
_serialize_op_response(out, value, set_type)
elif EventData == v_type:
_serialize_event_data(out, value, set_type)
else:
raise Exception("Cannot serialize value of type {}".format(v_type))
def _serialize_parameters(out, params):
try:
if params is None:
params = {}
_serialize_short(out, len(params), False)
for key in params:
_serialize_byte(out, key, False)
_serialize(out, params[key], True)
except:
traceback.print_exc()
def _serialize_string(out, value, set_type):
if set_type:
out.extend(bytearray([115]))
str_bytes = bytearray(value, "utf-8")
_serialize_short(out, len(str_bytes), False)
out.extend(str_bytes)
def _serialize_boolean(out, value, set_type):
if set_type:
out.extend(bytearray([111]))
_serialize_byte(out, 1 if value is True else 0, False)
def _serialize_byte(out, value, set_type):
if set_type:
out.extend(bytearray([98]))
value = ((value + ((1 << 7) - 1)) % (1 << 8)) - ((1 << 7) - 1)
out.extend(struct.pack('>b', value))
def _serialize_short(out, value, set_type):
if set_type:
out.extend(bytearray([107]))
value = ((value + ((1 << 15) - 1)) % (1 << 16)) - ((1 << 15) - 1)
out.extend(struct.pack('>h', value))
def _serialize_integer(out, value, set_type):
if set_type:
out.extend(bytearray([105]))
value = ((value + ((1 << 31) - 1)) % (1 << 32)) - ((1 << 31) - 1)
out.extend(struct.pack('>i', value))
def _serialize_long(out, value, set_type):
if set_type:
out.extend(bytearray([108]))
value = ((value + ((1 << 63) - 1)) % (1 << 64)) - ((1 << 63) - 1)
out.extend(struct.pack('>q', value))
def _serialize_float(out, value, set_type):
if set_type:
out.extend(bytearray([102]))
out.extend(struct.pack('>f', value))
def _serialize_double(out, value, set_type):
if set_type:
out.extend(bytearray([100]))
out.extend(struct.pack('>d', value))
def _serialize_bytearray(out, value, set_type):
if set_type:
out.extend(bytearray([120]))
_serialize_integer(out, len(value), False)
out.extend(value)
def _serialize_array(out, value, set_type):
if set_type:
out.extend(bytearray([121]))
_serialize_short(out, len(value), False)
code = _get_code_for_array_typecode(value.typecode)
func = _get_serialize_func_for_code(code)
_serialize_byte(out, code, False)
for val in value:
func(out, val, False)
def _serialize_list(out, value, set_type):
"""
Now we can serialize only not empty list of strings.
For empty list please use None and for list of other type user array()
"""
if len(value) == 0:
raise ValueError("List must be not empty")
if set_type:
out.extend(bytearray([121]))
_serialize_short(out, len(value), False)
_serialize_byte(out, 115, False)
for val in value:
_serialize_string(out, val, False)
def _serialize_dict(out, value, set_type):
if set_type:
out.extend(bytearray([104]))
_serialize_short(out, len(value), False)
for key in value:
if key is None:
raise ValueError("None keys are now allowed for dict!")
_serialize(out, key, True)
_serialize(out, value[key], True)
def _serialize_typed_dict(out, value, set_type):
if set_type:
out.extend(bytearray([68]))
if object == value.key_type:
_serialize_byte(out, 0, False)
else:
_serialize_byte(out, _get_code_for_type(value.key_type), False)
if object == value.value_type:
_serialize_byte(out, 0, False)
else:
_serialize_byte(out, _get_code_for_type(value.value_type), False)
_serialize_short(out, len(value), False)
for key in value:
if key is None:
raise ValueError("None keys are now allowed for dict!")
_serialize(out, key, object == value.key_type)
if int is value.value_type:
_serialize_integer(out, value[key], False)
else:
_serialize(out, value[key], object == value.value_type)
def _serialize_event_data(out, value, set_type):
if set_type:
out.extend(bytearray([101]))
out.extend(bytearray([value.code]))
_serialize_parameters(out, value.params)
def _serialize_op_request(out, value, set_type):
if set_type:
out.extend(bytearray([113]))
out.extend(bytearray([value.op_code]))
_serialize_parameters(out, value.params)
def _serialize_op_response(out, value, set_type):
if set_type:
out.extend(bytearray([112]))
_serialize_byte(out, value.op_code, False)
_serialize_short(out, value.return_code, False)
if value.debug_message is None or len(value.debug_message) == 0:
_serialize_byte(out, 42, False)
else:
_serialize_byte(out, 115, False)
_serialize_string(out, value.debug_message, False)
_serialize_parameters(out, value.params)
def _deserialize(buf, v_type=None):
if v_type is None:
v_type = _deserialize_byte(buf)
if v_type == 0 or v_type == 42:
return None
elif v_type == 115:
return _deserialize_string(buf)
elif v_type == 111:
return _deserialize_boolean(buf)
elif v_type == 98:
return _deserialize_byte(buf)
elif v_type == 107:
return _deserialize_short(buf)
elif v_type == 105:
return _deserialize_integer(buf)
elif v_type == 108:
return _deserialize_long(buf)
elif v_type == 102:
return _deserialize_float(buf)
elif v_type == 100:
return _deserialize_double(buf)
elif v_type == 120:
return _deserialize_bytearray(buf)
elif v_type == 121:
return _deserialize_array(buf)
elif v_type == 104:
return _deserialize_dict(buf)
elif v_type == 68:
return _deserialize_typed_dict(buf)
elif v_type == 113:
return deserialize_op_request(buf)
elif v_type == 112:
return deserialize_op_response(buf)
elif v_type == 101:
return deserialize_event_data(buf)
else:
raise Exception("Cannot serialize value of type {}".format(v_type))
def _deserialize_parameters(buf):
params = {}
length = _deserialize_short(buf)
for i in range(length):
key = _deserialize_byte(buf)
value = _deserialize(buf)
params[key] = value
return params
def _deserialize_string(buf):
length = _deserialize_short(buf)
return _fetch_bytes(buf, length).decode("utf-8")
def _deserialize_boolean(buf):
return _deserialize_byte(buf) == 1
def _deserialize_byte(buf):
return struct.unpack('>b', _fetch_bytes(buf, 1))[0]
def _deserialize_short(buf):
return struct.unpack('>h', _fetch_bytes(buf, 2))[0]
def _deserialize_integer(buf):
return struct.unpack('>i', _fetch_bytes(buf, 4))[0]
def _deserialize_long(buf):
return struct.unpack('>q', _fetch_bytes(buf, 8))[0]
def _deserialize_float(buf):
return struct.unpack('>f', _fetch_bytes(buf, 4))[0]
def _deserialize_double(buf):
return struct.unpack('>d', _fetch_bytes(buf, 8))[0]
def _deserialize_bytearray(buf):
length = _deserialize_integer(buf)
return _fetch_bytes(buf, length)
def _deserialize_array(buf):
length = _deserialize_short(buf)
code = _deserialize_byte(buf)
if code == 115:
result = []
for i in range(length):
result.append(_deserialize_string(buf))
return result
else:
typecode = _get_array_typecode_for_code(code)
func = _get_deserialize_func_for_code(code)
result = array.array(typecode)
for i in range(length):
result.append(func(buf))
return result
def _deserialize_dict(buf):
length = _deserialize_short(buf)
result = {}
for i in range(length):
result[_deserialize(buf)] = _deserialize(buf)
return result
def _deserialize_typed_dict(buf):
key_type_code = _deserialize_byte(buf)
value_type_code = _deserialize_byte(buf)
result = typed_dict(_get_type_for_code(key_type_code), _get_type_for_code(value_type_code))
read_key_type = key_type_code == 0 or key_type_code == 42
read_value_type = value_type_code == 0 or value_type_code == 42
length = _deserialize_short(buf)
for i in range(length):
key = _deserialize(buf, None if read_key_type else key_type_code)
value = _deserialize(buf, None if read_value_type else value_type_code)
result[key] = value
return result
def _get_serialize_func_for_code(code):
if code is 115:
return _serialize_string
if code is 111:
return _serialize_boolean
if code is 98:
return _serialize_byte
if code is 107:
return _serialize_short
if code is 105:
return _serialize_integer
if code is 108:
return _serialize_long
if code is 102:
return _serialize_float
if code is 100:
return _serialize_double
if code is 120:
return _serialize_bytearray
if code is 121:
return _serialize_array
if code is 104:
return _serialize_dict
if code is 101:
return _serialize_event_data
if code is 113:
return _serialize_op_request
if code is 112:
return _serialize_op_response
else:
raise Exception("Unknown code: {}".format(code))
def _get_deserialize_func_for_code(code):
if code is 115:
return _deserialize_string
if code is 111:
return _deserialize_boolean
if code is 98:
return _deserialize_byte
if code is 107:
return _deserialize_short
if code is 105:
return _deserialize_integer
if code is 108:
return _deserialize_long
if code is 102:
return _deserialize_float
if code is 100:
return _deserialize_double
if code is 120:
return _deserialize_bytearray
if code is 121:
return _deserialize_array
if code is 104:
return _deserialize_dict
if code is 101:
return deserialize_event_data
if code is 113:
return deserialize_op_request
if code is 112:
return deserialize_op_response
else:
raise Exception("Unknown code: {}".format(code))
def _get_code_for_array_typecode(typecode):
if typecode is 'b' or typecode is 'B':
return 98
if typecode is 'h' or typecode is 'H':
return 107
if typecode is 'i' or typecode is 'I':
return 105
if typecode is 'l' or typecode is 'L':
return 108
if typecode is 'q' or typecode is 'Q':
return 108
if typecode is 'f':
return 102
if typecode is 'd':
return 100
else:
raise Exception("Unknown typecode: {}".format(typecode))
def _get_array_typecode_for_code(code):
if code is 98:
return 'b'
if code is 107:
return 'h'
if code is 105:
return 'i'
if code is 108:
return 'q'
if code is 102:
return 'f'
if code is 100:
return 'd'
else:
raise Exception("Unknown code: {}".format(code))
def _get_code_for_type(v_type, value=None):
if v_type is None:
return 42
elif str == v_type:
return 115
elif bool == v_type:
return 111
elif int == v_type:
if value is None:
return 105
byte_cnt = (value.bit_length() + 7) // 8
if byte_cnt == 1:
return 98
elif byte_cnt == 2:
return 107
elif byte_cnt <= 4:
return 105
elif byte_cnt <= 8:
return 108
elif float == v_type:
return 100
elif bytearray == v_type:
return 120
elif array.array == v_type:
return 121
elif dict == v_type:
return 104
elif typed_dict == v_type:
return 68
elif list == v_type:
return 121
elif OperationRequest == v_type:
return 113
elif OperationResponse == v_type:
return 112
elif EventData == v_type:
return 101
else:
raise Exception("Cannot serialize value of type {}".format(v_type))
def _get_type_for_code(code):
if code == 42 or code == 0:
return object
elif code == 115:
return str
elif code == 111:
return bool
elif code == 98 or code == 107 or code == 105 or code == 108:
return int
elif code == 102 or code == 100:
return float
elif code == 120:
return bytearray
elif code == 121:
return array.array
elif code == 104:
return dict
elif code == 68:
return typed_dict
elif code == 113:
return OperationRequest
elif code == 112:
return OperationResponse
elif code == 101:
return EventData
else:
raise Exception("Cannot serialize value of type {}".format(v_type))
def _fetch_bytes(buf, count):
res = buf[0:count]
for i in range(0, count):
buf.pop(0)
return res
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import json
import logging
import sys
import traceback
import urllib2
import webapp2
from google.appengine.api import memcache
MASTERS = [
{'name': 'ChromiumWin', 'url': 'http://build.chromium.org/p/chromium.win', 'groups': ['@ToT Chromium']},
{'name': 'ChromiumMac', 'url': 'http://build.chromium.org/p/chromium.mac', 'groups': ['@ToT Chromium']},
{'name': 'ChromiumLinux', 'url': 'http://build.chromium.org/p/chromium.linux', 'groups': ['@ToT Chromium']},
{'name': 'ChromiumChromiumOS', 'url': 'http://build.chromium.org/p/chromium.chromiumos', 'groups': ['@ToT ChromeOS']},
{'name': 'ChromiumGPU', 'url': 'http://build.chromium.org/p/chromium.gpu', 'groups': ['@ToT Chromium']},
{'name': 'ChromiumGPUFYI', 'url': 'http://build.chromium.org/p/chromium.gpu.fyi', 'groups': ['@ToT Chromium FYI']},
{'name': 'ChromiumWebkit', 'url': 'http://build.chromium.org/p/chromium.webkit', 'groups': ['@ToT Chromium', '@ToT Blink']},
{'name': 'ChromiumFYI', 'url': 'http://build.chromium.org/p/chromium.fyi', 'groups': ['@ToT Chromium FYI']},
{'name': 'V8', 'url': 'http://build.chromium.org/p/client.v8', 'groups': ['@ToT V8']},
]
class FetchBuildersException(Exception): pass
def master_json_url(master_url):
return master_url + '/json/builders'
def builder_json_url(master_url, builder):
return master_json_url(master_url) + '/' + urllib2.quote(builder)
def cached_build_json_url(master_url, builder, build_number):
return builder_json_url(master_url, builder) + '/builds/' + str(build_number)
def fetch_json(url):
logging.debug('Fetching %s' % url)
fetched_json = {}
try:
resp = urllib2.urlopen(url)
except:
exc_info = sys.exc_info()
logging.warning('Error while fetching %s: %s', url, exc_info[1])
return fetched_json
try:
fetched_json = json.load(resp)
except:
exc_info = sys.exc_info()
logging.warning('Unable to parse JSON response from %s: %s', url, exc_info[1])
return fetched_json
def get_latest_build(build_data):
cached_builds = []
if 'cachedBuilds' in build_data:
cached_builds = build_data['cachedBuilds']
current_builds = build_data['currentBuilds']
latest_cached_builds = set(cached_builds) - set(current_builds)
if len(latest_cached_builds) != 0:
latest_cached_builds = sorted(list(latest_cached_builds))
latest_build = latest_cached_builds[-1]
elif len(current_builds) != 0:
latest_build = current_builds[0]
else:
basedir = build_data['basedir'] if 'basedir' in build_data else 'current builder'
logging.info('No cached or current builds for %s', basedir)
return None
return latest_build
def dump_json(data):
return json.dumps(data, separators=(',', ':'), sort_keys=True)
def fetch_buildbot_data(masters, force_update=False):
if force_update:
logging.info('Starting a forced buildbot update. Failure to fetch a master\'s data will not abort the fetch.')
start_time = datetime.datetime.now()
master_data = masters[:]
for master in master_data:
master_url = master['url']
tests_object = master.setdefault('tests', {})
master['tests'] = tests_object
builders = fetch_json(master_json_url(master_url))
if not builders:
msg = 'Could not fetch builders from master "%s": %s.' % (master['name'], master_url)
logging.warning(msg)
if force_update:
continue
else:
logging.warning('Aborting fetch.')
raise FetchBuildersException(msg)
for builder in builders:
build_data = fetch_json(builder_json_url(master_url, builder))
latest_build = get_latest_build(build_data)
if not latest_build:
logging.info('Skipping builder %s because it lacked cached or current builds.', builder)
continue
build = fetch_json(cached_build_json_url(master_url, builder, latest_build))
if not build:
logging.info('Skipping build %s on builder %s due to empty data', latest_build, builder)
for step in build['steps']:
step_name = step['name']
is_test_step = 'test' in step_name and 'archive' not in step_name and 'Run tests' not in step_name
if not is_test_step:
continue
if step_name == 'webkit_tests':
step_name = 'layout-tests'
tests_object.setdefault(step_name, {'builders': []})
tests_object[step_name]['builders'].append(builder)
for builders in tests_object.values():
builders['builders'].sort()
output_data = {'masters': master_data}
delta = datetime.datetime.now() - start_time
logging.info('Fetched buildbot data in %s seconds.', delta.seconds)
return dump_json(output_data)
class UpdateBuilders(webapp2.RequestHandler):
"""Fetch and update the cached buildbot data."""
def get(self):
force_update = True if self.request.get('force') else False
try:
buildbot_data = fetch_buildbot_data(MASTERS, force_update)
memcache.set('buildbot_data', buildbot_data)
self.response.set_status(200)
self.response.out.write("ok")
except FetchBuildersException, ex:
logging.error('Not updating builders because fetch failed: %s', str(ex))
self.response.set_status(500)
self.response.out.write(ex.message)
class GetBuilders(webapp2.RequestHandler):
"""Return a list of masters mapped to their respective builders, possibly using cached data."""
def get(self):
callback = self.request.get('callback')
buildbot_data = memcache.get('buildbot_data')
if not buildbot_data:
logging.warning('No buildbot data in memcache. If this message repeats, something is probably wrong with memcache.')
# Since we have no cached buildbot data, we would rather have missing masters than no data at all.
buildbot_data = fetch_buildbot_data(MASTERS, True)
try:
memcache.set('buildbot_data', buildbot_data)
except ValueError, err:
logging.error(str(err))
if callback:
buildbot_data = callback + '(' + buildbot_data + ');'
self.response.out.write(buildbot_data)
|
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCS related input readers."""
__all__ = [
"GCSInputReader",
"GCSRecordInputReader",
"PathFilter",
]
# pylint: disable=g-bad-name
# pylint: disable=protected-access
import logging
import pickle
import time
from mapreduce import errors
from mapreduce import records
from mapreduce.api import map_job
# pylint: disable=g-import-not-at-top
# TODO(user): Cleanup imports if/when cloudstorage becomes part of runtime.
try:
# Check if the full cloudstorage package exists. The stub part is in runtime.
import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
pass # CloudStorage library not available
class PathFilter(object):
"""Path filter for GCSInputReader."""
def accept(self, slice_ctx, path):
"""Accepts a path.
Only accepted path will be opened for read.
Args:
slice_ctx: the instance of SliceContext for current slice.
path: a GCS filename of form '/bucket/filename'
Returns:
True if this file should be read. False otherwise.
"""
raise NotImplementedError()
class GCSInputReader(map_job.InputReader):
"""Input reader from Google Cloud Storage using the cloudstorage library.
Required configuration in the mapper_spec.input_reader dictionary.
BUCKET_NAME_PARAM: name of the bucket to use. No "/" prefix or suffix.
OBJECT_NAMES_PARAM: a list of object names or prefixes. All objects must be
in the BUCKET_NAME_PARAM bucket. If the name ends with a * it will be
treated as prefix and all objects with matching names will be read.
Entries should not start with a slash unless that is part of the object's
name. An example list could be:
["my-1st-input-file", "directory/my-2nd-file", "some/other/dir/input-*"]
To retrieve all files "*" will match every object in the bucket. If a file
is listed twice or is covered by multiple prefixes it will be read twice,
there is no de-duplication.
Optional configuration in the mapper_sec.input_reader dictionary.
BUFFER_SIZE_PARAM: the size of the read buffer for each file handle.
PATH_FILTER_PARAM: an instance of PathFilter. PathFilter is a predicate
on which filenames to read.
DELIMITER_PARAM: str. The delimiter that signifies directory.
If you have too many files to shard on the granularity of individual
files, you can specify this to enable shallow splitting. In this mode,
the reader only goes one level deep during "*" expansion and stops when
the delimiter is encountered.
"""
# Counters.
COUNTER_FILE_READ = "file-read"
COUNTER_FILE_MISSING = "file-missing"
# Supported parameters
BUCKET_NAME_PARAM = "bucket_name"
OBJECT_NAMES_PARAM = "objects"
BUFFER_SIZE_PARAM = "buffer_size"
DELIMITER_PARAM = "delimiter"
PATH_FILTER_PARAM = "path_filter"
# Internal parameters
_ACCOUNT_ID_PARAM = "account_id"
# Other internal configuration constants
_JSON_PICKLE = "pickle"
_STRING_MAX_FILES_LISTED = 10 # Max files shown in the str representation
# Input reader can also take in start and end filenames and do
# listbucket. This saves space but has two cons.
# 1. Files to read are less well defined: files can be added or removed over
# the lifetime of the MR job.
# 2. A shard has to process files from a contiguous namespace.
# May introduce staggering shard.
def __init__(self, filenames, index=0, buffer_size=None, _account_id=None,
delimiter=None, path_filter=None):
"""Initialize a GoogleCloudStorageInputReader instance.
Args:
filenames: A list of Google Cloud Storage filenames of the form
'/bucket/objectname'.
index: Index of the next filename to read.
buffer_size: The size of the read buffer, None to use default.
_account_id: Internal use only. See cloudstorage documentation.
delimiter: Delimiter used as path separator. See class doc.
path_filter: An instance of PathFilter.
"""
super(GCSInputReader, self).__init__()
self._filenames = filenames
self._index = index
self._buffer_size = buffer_size
self._account_id = _account_id
self._delimiter = delimiter
self._bucket = None
self._bucket_iter = None
self._path_filter = path_filter
self._slice_ctx = None
def _next_file(self):
"""Find next filename.
self._filenames may need to be expanded via listbucket.
Returns:
None if no more file is left. Filename otherwise.
"""
while True:
if self._bucket_iter:
try:
return self._bucket_iter.next().filename
except StopIteration:
self._bucket_iter = None
self._bucket = None
if self._index >= len(self._filenames):
return
filename = self._filenames[self._index]
self._index += 1
if self._delimiter is None or not filename.endswith(self._delimiter):
return filename
self._bucket = cloudstorage.listbucket(filename,
delimiter=self._delimiter)
self._bucket_iter = iter(self._bucket)
@classmethod
def validate(cls, job_config):
"""Validate mapper specification.
Args:
job_config: map_job.JobConfig.
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
reader_params = job_config.input_reader_params
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_params[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
# Delimiter.
if cls.DELIMITER_PARAM in reader_params:
delimiter = reader_params[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
# Buffer size.
if cls.BUFFER_SIZE_PARAM in reader_params:
buffer_size = reader_params[cls.BUFFER_SIZE_PARAM]
if not isinstance(buffer_size, int):
raise errors.BadReaderParamsError(
"%s is not an int but a %s" %
(cls.BUFFER_SIZE_PARAM, type(buffer_size)))
# Path filter.
if cls.PATH_FILTER_PARAM in reader_params:
path_filter = reader_params[cls.PATH_FILTER_PARAM]
if not isinstance(path_filter, PathFilter):
raise errors.BadReaderParamsError(
"%s is not an instance of PathFilter but %s." %
(cls.PATH_FILTER_PARAM, type(path_filter)))
@classmethod
def split_input(cls, job_config):
"""Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
job_config: map_job.JobConfig
Returns:
A list of InputReaders. None when no input data can be found.
"""
reader_params = job_config.input_reader_params
bucket = reader_params[cls.BUCKET_NAME_PARAM]
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
delimiter = reader_params.get(cls.DELIMITER_PARAM)
account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)
path_filter = reader_params.get(cls.PATH_FILTER_PARAM)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, job_config.shard_count):
shard_filenames = all_filenames[shard::job_config.shard_count]
if shard_filenames:
readers.append(cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter, path_filter=path_filter))
return readers
@classmethod
def from_json(cls, state):
obj = pickle.loads(state[cls._JSON_PICKLE])
if obj._bucket:
obj._bucket_iter = iter(obj._bucket)
return obj
def to_json(self):
before_iter = self._bucket_iter
before_slice_ctx = self._slice_ctx
self._bucket_iter = None
self._slice_ctx = None
try:
return {self._JSON_PICKLE: pickle.dumps(self)}
finally:
self._bucket_itr = before_iter
self._slice_ctx = before_slice_ctx
def next(self):
"""Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
if (self._path_filter and
not self._path_filter.accept(self._slice_ctx, filename)):
continue
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
self._slice_ctx.incr(self.COUNTER_FILE_READ)
return handle
except cloudstorage.NotFoundError:
logging.warning("File %s may have been removed. Skipping file.",
filename)
self._slice_ctx.incr(self.COUNTER_FILE_MISSING)
def __str__(self):
# Only show a limited number of files individually for readability
num_files = len(self._filenames)
if num_files > self._STRING_MAX_FILES_LISTED:
names = "%s...%s + %d not shown" % (
",".join(self._filenames[0:self._STRING_MAX_FILES_LISTED-1]),
self._filenames[-1],
num_files - self._STRING_MAX_FILES_LISTED)
else:
names = ",".join(self._filenames)
if self._index > num_files:
status = "EOF"
else:
status = "Next %s (%d of %d)" % (
self._filenames[self._index],
self._index + 1, # +1 for human 1-indexing
num_files)
return "CloudStorage [%s, %s]" % (status, names)
@classmethod
def params_to_json(cls, params):
"""Inherit docs."""
params_cp = dict(params)
if cls.PATH_FILTER_PARAM in params_cp:
path_filter = params_cp[cls.PATH_FILTER_PARAM]
params_cp[cls.PATH_FILTER_PARAM] = pickle.dumps(path_filter)
return params_cp
@classmethod
def params_from_json(cls, json_params):
if cls.PATH_FILTER_PARAM in json_params:
path_filter = pickle.loads(json_params[cls.PATH_FILTER_PARAM])
json_params[cls.PATH_FILTER_PARAM] = path_filter
return json_params
class GCSRecordInputReader(GCSInputReader):
"""Read data from a Google Cloud Storage file using LevelDB format.
See the GCSInputReader for additional configuration options.
"""
def __getstate__(self):
result = self.__dict__.copy()
# record reader may not exist if reader has not been used
if "_record_reader" in result:
# RecordsReader has no buffering, it can safely be reconstructed after
# deserialization
result.pop("_record_reader")
return result
def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
|
|
from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.compatibility import range
from .zeta_functions import zeta
from .error_functions import erf, erfc
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return self.func(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
if arg.is_integer and arg.is_nonpositive:
return S.ComplexInfinity
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return self.func(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return self.func(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (self.func(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Args length should be 1")
aa = printer._print(self.args[0])
if exp:
return r'\Gamma^{%s}{\left(%s \right)}' % (printer._print(exp), aa)
else:
return r'\Gamma{\left(%s \right)}' % aa
@staticmethod
def _latex_no_arg(printer):
return r'\Gamma'
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-x**2*exp(-x) - 2*x*exp(-x) + 2 - 2*exp(-x)
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * exp(-x)
if not a.is_Integer:
return (cls(a + 1, x) + x**a * exp(-x))/a
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
@staticmethod
def _latex_no_arg(printer):
return r'\gamma'
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
x**2*exp(-x) + 2*x*exp(-x) + 2*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*erfc(sqrt(x)) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] http://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
# TODO: Holds only for Re(a) > 0:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and (a > 0) == True:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and (a <= 0) == True:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*erfc(sqrt(z))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * exp(-z)
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (cls(a + 1, z) - z**a * exp(-z))/a
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-3*log(3)/2 - sqrt(3)*pi/6 - EulerGamma
>>> polygamma(0, 1/S(4))
-3*log(2) - pi/2 - EulerGamma
>>> polygamma(0, 2)
-EulerGamma + 1
>>> polygamma(0, 23)
-EulerGamma + 19093197/5173168
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = Order(1/z, x)
else:
m = ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = Order(1/z**(2*m), x)
if n == 0:
o = Order(1/z, x)
elif n == 1:
o = Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = list(map(sympify, (n, z)))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
# TODO actually *any* n/m can be done, but that is messy
lookup = {S(1)/2: -2*log(2) - S.EulerGamma,
S(1)/3: -S.Pi/2/sqrt(3) - 3*log(3)/2 - S.EulerGamma,
S(1)/4: -S.Pi/2 - 3*log(2) - S.EulerGamma,
S(3)/4: -3*log(2) - S.EulerGamma + S.Pi/2,
S(2)/3: -3*log(3)/2 + S.Pi/2/sqrt(3) - S.EulerGamma}
if z > 0:
n = floor(z)
z0 = z - n
if z0 in lookup:
return lookup[z0] + Add(*[1/(z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
if z0 in lookup:
return lookup[z0] - Add(*[1/(z0 - 1 - k) for k in range(n)])
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[Pow(
z - i, e) for i in range(1, int(coeff) + 1)])
else:
tail = -Add(*[Pow(
z + i, e) for i in range(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + Rational(
i, coeff)) for i in range(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
if n >= S.One:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
else:
return self
def _eval_rewrite_as_harmonic(self, n, z):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * factorial(n) * (zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
from sympy import Order
n, z = [a.as_leading_term(x) for a in self.args]
o = Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
from sympy import Sum
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = Order(1, x)
else:
o = Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z):
return log(gamma(z))
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.log_gamma(self.args[0]._sage_())
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
|
|
# Alpha O. Sall
# 03/24/2014
from flask import Flask, request, Response, render_template
app = Flask(__name__)#,template_folder='/var/www/inkscope/inkscopeAdm/')
import requests
from array import *
import sys
from urllib2 import HTTPError
import json
from bson.json_util import dumps
import time
import mongoJuiceCore
import poolsCtrl
try:
import poolsCtrlSalt
except:
pass
import osdsCtrl
from S3Ctrl import S3Ctrl, S3Error
from Log import Log
#Added for S3 objects management
from S3ObjectCtrl import *
# Load configuration from file
configfile = "/opt/inkscope/etc/inkscope.conf"
datasource = open(configfile, "r")
conf = json.load(datasource)
datasource.close()
try:
minion = conf.get("minion")
except:
pass
#
# mongoDB query facility
#
@app.route('/<db>/<collection>', methods=['GET', 'POST'])
def find(db, collection):
return mongoJuiceCore.find(conf, db, collection)
@app.route('/<db>', methods=['POST'])
def full(db):
return mongoJuiceCore.full(conf, db)
#
# Pools management
#
## Ceph Rest API
@app.route('/pools/', methods=['GET','POST'])
@app.route('/pools/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage(id=None):
return poolsCtrl.pool_manage(id)
@app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(id):
return poolsCtrl.makesnapshot(id)
@app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(id, namesnapshot):
return poolsCtrl.removesnapshot(id, namesnapshot)
## Rest API with Salt
try:
@app.route('/poolsalt/', methods=['GET','POST'])
@app.route('/poolsalt/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage_salt(id=None):
return poolsCtrlSalt.pool_manage_salt(id, minion)
except:
pass
try:
@app.route('/poolsalt/<int:id>/snapshot', methods=['POST'])
def makesnapshot_salt(id):
return poolsCtrl.makesnapshot(id, minion)
except:
pass
try:
@app.route('/poolsalt/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot_salt(id, namesnapshot):
return poolsCtrl.removesnapshot(id, namesnapshot, minion)
except:
pass
#
# Osds management
#
@app.route('/osds', methods=['PUT'])
def osds_manage(id=None):
return osdsCtrl.osds_manage(id)
#
# Object storage management
#
# This method return a S3 Object that id is "objId".
# An exception is trhown if the object does not exist or there an issue
@app.route('/S3/object', methods=['GET'])
def getObjectStructure() :
Log.debug("Calling getObjectStructure() method")
try :
return Response(S3ObjectCtrl(conf).getObjectStructure(),mimetype='application/json')
except S3Error , e :
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# User management
@app.route('/S3/user', methods=['GET'])
def listUser():
try:
return Response(S3Ctrl(conf).listUsers(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user', methods=['POST'])
def createUser():
try:
return Response(S3Ctrl(conf).createUser(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['GET'])
def getUser(uid):
try:
return Response(S3Ctrl(conf).getUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['PUT'])
def modifyUser(uid):
try:
return Response(S3Ctrl(conf).modifyUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['DELETE'])
def removeUser(uid):
try:
return Response(S3Ctrl(conf).removeUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/key/<string:key>', methods=['DELETE'])
def removeUserKey(uid,key):
try:
return Response(S3Ctrl(conf).removeUserKey(uid,key),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser', methods=['PUT'])
def createSubuser(uid):
try:
return Response(S3Ctrl(conf).createSubuser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>', methods=['DELETE'])
def deleteSubuser(uid, subuser):
try:
return Response(S3Ctrl(conf).deleteSubuser(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['PUT'])
def createSubuserKey(uid, subuser):
Log.debug("createSubuserKey")
try:
return Response(S3Ctrl(conf).createSubuserKey(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key/<string:key>', methods=['DELETE'])
def deleteSubuserKey(uid, subuser, key):
Log.debug("deleteSubuserKey")
try:
return Response(S3Ctrl(conf).deleteSubuserKey(uid, subuser,key),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['PUT', 'POST'])
def saveCapability(uid):
Log.debug("saveCapability")
try:
return Response(S3Ctrl(conf).saveCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['DELETE'])
def deleteCapability(uid):
Log.debug("deleteCapability")
try:
return Response(S3Ctrl(conf).deleteCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# bucket management
@app.route('/S3/user/<string:uid>/buckets', methods=['GET'])
def getUserBuckets(uid,bucket=None):
try:
return Response(S3Ctrl(conf).getUserBuckets(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['PUT'])
def createBucket():
try:
return Response(S3Ctrl(conf).createBucket(), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['GET'])
def getBuckets():
try:
return Response(S3Ctrl(conf).getBucketInfo(None), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['GET'])
def getBucketInfo(bucket=None):
try:
return Response(S3Ctrl(conf).getBucketInfo(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['DELETE'])
def deleteBucket(bucket):
try:
return Response(S3Ctrl(conf).deleteBucket(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/link', methods=['DELETE','PUT'])
def linkBucket(bucket):
try:
uid = request.form['uid']
if request.method =='PUT':
return Response(S3Ctrl(conf).linkBucket(uid, bucket), mimetype='application/json')
else:
return Response(S3Ctrl(conf).unlinkBucket(uid, bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucketName>/list', methods=['GET'])
def listBucket(bucketName):
try:
return Response(S3Ctrl(conf).listBucket(bucketName), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""rospy internal core implementation library"""
import atexit
import logging
import os
import signal
import sys
import threading
import time
import traceback
import types
try:
import urllib.parse as urlparse #Python 3.x
except ImportError:
import urlparse
try:
import xmlrpc.client as xmlrpcclient #Python 3.x
except ImportError:
import xmlrpclib as xmlrpcclient #Python 2.x
import rospkg
import rosgraph.roslogging
import rospy.exceptions
import rospy.rostime
from rospy.names import *
from rospy.impl.validators import ParameterInvalid
from rosgraph_msgs.msg import Log
_logger = logging.getLogger("rospy.core")
# number of seconds to wait to join on threads. network issue can
# cause joins to be not terminate gracefully, and it's better to
# teardown dirty than to hang
_TIMEOUT_SHUTDOWN_JOIN = 5.
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
#########################################################
# ROSRPC
ROSRPC = "rosrpc://"
def parse_rosrpc_uri(uri):
"""
utility function for parsing ROS-RPC URIs
@param uri: ROSRPC URI
@type uri: str
@return: address, port
@rtype: (str, int)
@raise ParameterInvalid: if uri is not a valid ROSRPC URI
"""
if uri.startswith(ROSRPC):
dest_addr = uri[len(ROSRPC):]
else:
raise ParameterInvalid("Invalid protocol for ROS service URL: %s"%uri)
try:
if '/' in dest_addr:
dest_addr = dest_addr[:dest_addr.find('/')]
dest_addr, dest_port = dest_addr.split(':')
dest_port = int(dest_port)
except:
raise ParameterInvalid("ROS service URL is invalid: %s"%uri)
return dest_addr, dest_port
#########################################################
# rospy logger
_rospy_logger = logging.getLogger("rospy.internal")
# we keep a separate, non-rosout log file to contain stack traces and
# other sorts of information that scare users but are essential for
# debugging
def rospydebug(msg, *args):
"""Internal rospy client library debug logging"""
_rospy_logger.debug(msg, *args)
def rospyinfo(msg, *args):
"""Internal rospy client library debug logging"""
_rospy_logger.info(msg, *args)
def rospyerr(msg, *args):
"""Internal rospy client library error logging"""
_rospy_logger.error(msg, *args)
def rospywarn(msg, *args):
"""Internal rospy client library warn logging"""
_rospy_logger.warn(msg, *args)
logdebug = logging.getLogger('rosout').debug
logwarn = logging.getLogger('rosout').warning
loginfo = logging.getLogger('rosout').info
logout = loginfo # alias deprecated name
logerr = logging.getLogger('rosout').error
logerror = logerr # alias logerr
logfatal = logging.getLogger('rosout').critical
#########################################################
# CONSTANTS
MASTER_NAME = "master" #master is a reserved node name for the central master
import warnings
import functools
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
@functools.wraps(func)
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return newFunc
@deprecated
def get_ros_root(required=False, env=None):
"""
Get the value of ROS_ROOT.
@param env: override environment dictionary
@type env: dict
@param required: if True, fails with ROSException
@return: Value of ROS_ROOT environment
@rtype: str
@raise ROSException: if require is True and ROS_ROOT is not set
"""
if env is None:
env = os.environ
ros_root = rospkg.get_ros_root(env)
if required and not ros_root:
raise rospy.exceptions.ROSException('%s is not set'%rospkg.environment.ROS_ROOT)
return ros_root
#########################################################
# API
_uri = None
def get_node_uri():
"""
Get this Node's URI.
@return: this Node's XMLRPC URI
@rtype: str
"""
return _uri
def set_node_uri(uri):
"""set the URI of the local node.
This is an internal API method, it does not actually affect the XMLRPC URI of the Node."""
global _uri
_uri = uri
#########################################################
# Logging
_log_filename = None
def configure_logging(node_name, level=logging.INFO):
"""
Setup filesystem logging for this node
@param node_name: Node's name
@type node_name str
@param level: (optional) Python logging level (INFO, DEBUG, etc...). (Default: logging.INFO)
@type level: int
"""
global _log_filename
# #988 __log command-line remapping argument
mappings = get_mappings()
if '__log' in get_mappings():
logfilename_remap = mappings['__log']
filename = os.path.abspath(logfilename_remap)
else:
# fix filesystem-unsafe chars
filename = node_name.replace('/', '_') + '.log'
if filename[0] == '_':
filename = filename[1:]
if not filename:
raise rospy.exceptions.ROSException('invalid configure_logging parameter: %s'%node_name)
_log_filename = rosgraph.roslogging.configure_logging('rospy', level, filename=filename)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# keep logging happy until we have the node name to configure with
logging.getLogger('rospy').addHandler(NullHandler())
#########################################################
# Init/Shutdown/Exit API and Handlers
_client_ready = False
def is_initialized():
"""
Get the initialization state of the local node. If True, node has
been configured.
@return: True if local node initialized
@rtype: bool
"""
return _client_ready
def set_initialized(initialized):
"""
set the initialization state of the local node
@param initialized: True if node initialized
@type initialized: bool
"""
global _client_ready
_client_ready = initialized
_shutdown_lock = threading.RLock()
# _shutdown_flag flags that rospy is in shutdown mode, in_shutdown
# flags that the shutdown routine has started. These are separate
# because 'pre-shutdown' hooks require rospy to be in a non-shutdown
# mode. These hooks are executed during the shutdown routine.
_shutdown_flag = False
_in_shutdown = False
# various hooks to call on shutdown. shutdown hooks are called in the
# shutdown state, preshutdown are called just before entering shutdown
# state, and client shutdown is called before both of these.
_shutdown_hooks = []
_preshutdown_hooks = []
_client_shutdown_hooks = []
# threads that must be joined on shutdown
_shutdown_threads = []
_signalChain = {}
def is_shutdown():
"""
@return: True if shutdown flag has been set
@rtype: bool
"""
return _shutdown_flag
def is_shutdown_requested():
"""
is_shutdown_requested is a state that occurs just before
is_shutdown. It is initiated when a shutdown requested is
received and continues until client shutdown handlers have been
called. After client shutdown handlers have been serviced, the
is_shutdown state becomes true.
@return: True if shutdown has been requested (but possibly not yet initiated)
@rtype: bool
"""
return _in_shutdown
def _add_shutdown_hook(h, hooks, pass_reason_argument=True):
"""
shared implementation of add_shutdown_hook and add_preshutdown_hook
"""
if not callable(h):
raise TypeError("shutdown hook [%s] must be a function or callable object: %s"%(h, type(h)))
if _shutdown_flag:
_logger.warn("add_shutdown_hook called after shutdown")
if pass_reason_argument:
h("already shutdown")
else:
h()
return
with _shutdown_lock:
if hooks is None:
# race condition check, don't log as we are deep into shutdown
return
hooks.append(h)
def _add_shutdown_thread(t):
"""
Register thread that must be joined() on shutdown
"""
if _shutdown_flag:
#TODO
return
with _shutdown_lock:
if _shutdown_threads is None:
# race condition check, don't log as we are deep into shutdown
return
# in order to prevent memory leaks, reap dead threads. The
# last thread may not get reaped until shutdown, but this is
# relatively minor
for other in _shutdown_threads[:]:
if not other.isAlive():
_shutdown_threads.remove(other)
_shutdown_threads.append(t)
def add_client_shutdown_hook(h):
"""
Add client method to invoke when system shuts down. Unlike
L{add_shutdown_hook} and L{add_preshutdown_hooks}, these methods
will be called before any rospy internal shutdown code.
@param h: function with zero args
@type h: fn()
"""
_add_shutdown_hook(h, _client_shutdown_hooks, pass_reason_argument=False)
def add_preshutdown_hook(h):
"""
Add method to invoke when system shuts down. Unlike
L{add_shutdown_hook}, these methods will be called before any
other shutdown hooks.
@param h: function that takes in a single string argument (shutdown reason)
@type h: fn(str)
"""
_add_shutdown_hook(h, _preshutdown_hooks)
def add_shutdown_hook(h):
"""
Add method to invoke when system shuts down.
Shutdown hooks are called in the order that they are
registered. This is an internal API method that is used to
cleanup. See the client X{on_shutdown()} method if you wish to
register client hooks.
@param h: function that takes in a single string argument (shutdown reason)
@type h: fn(str)
"""
_add_shutdown_hook(h, _shutdown_hooks)
def signal_shutdown(reason):
"""
Initiates shutdown process by signaling objects waiting on _shutdown_lock.
Shutdown and pre-shutdown hooks are invoked.
@param reason: human-readable shutdown reason, if applicable
@type reason: str
"""
global _shutdown_flag, _in_shutdown, _shutdown_lock, _shutdown_hooks
_logger.info("signal_shutdown [%s]"%reason)
if _shutdown_flag or _in_shutdown:
return
with _shutdown_lock:
if _shutdown_flag or _in_shutdown:
return
_in_shutdown = True
# make copy just in case client re-invokes shutdown
for h in _client_shutdown_hooks:
try:
# client shutdown hooks do not accept a reason arg
h()
except:
traceback.print_exc()
del _client_shutdown_hooks[:]
for h in _preshutdown_hooks:
try:
h(reason)
except:
traceback.print_exc()
del _preshutdown_hooks[:]
# now that pre-shutdown hooks have been called, raise shutdown
# flag. This allows preshutdown hooks to still publish and use
# service calls properly
_shutdown_flag = True
for h in _shutdown_hooks:
try:
h(reason)
except Exception as e:
sys.stderr.write("signal_shutdown hook error[%s]\n"%e)
del _shutdown_hooks[:]
threads = _shutdown_threads[:]
for t in threads:
if t.isAlive():
t.join(_TIMEOUT_SHUTDOWN_JOIN)
del _shutdown_threads[:]
try:
rospy.rostime.wallsleep(0.1) #hack for now until we get rid of all the extra threads
except KeyboardInterrupt: pass
def _ros_signal(sig, stackframe):
signal_shutdown("signal-"+str(sig))
prev_handler = _signalChain.get(sig, None)
if prev_handler is not None and not type(prev_handler) == int:
try:
prev_handler(sig, stackframe)
except KeyboardInterrupt:
pass #filter out generic keyboard interrupt handler
def _ros_atexit():
signal_shutdown('atexit')
atexit.register(_ros_atexit)
# #687
def register_signals():
"""
register system signal handlers for SIGTERM and SIGINT
"""
_signalChain[signal.SIGTERM] = signal.signal(signal.SIGTERM, _ros_signal)
_signalChain[signal.SIGINT] = signal.signal(signal.SIGINT, _ros_signal)
# Validators ######################################
def is_topic(param_name):
"""
Validator that checks that parameter is a valid ROS topic name
"""
def validator(param_value, caller_id):
v = valid_name_validator_resolved(param_name, param_value, caller_id)
if param_value == '/':
raise ParameterInvalid("ERROR: parameter [%s] cannot be the global namespace"%param_name)
return v
return validator
def xmlrpcapi(uri):
"""
@return: instance for calling remote server or None if not a valid URI
@rtype: xmlrpclib.ServerProxy
"""
if uri is None:
return None
uriValidate = urlparse.urlparse(uri)
if not uriValidate[0] or not uriValidate[1]:
return None
return xmlrpcclient.ServerProxy(uri)
|
|
# Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example:
python -m classification.metrics.compute_fishing_metrics \
--inference-table machine_learning_dev_ttl_120d.fishing_detection_vid_features_v20190509_ \
--label-path classification/data/det_info_v20190507.csv \
--dest-path ./test_fishing_inference_0509.html \
--fishing-ranges classification/data/det_ranges_v20190507.csv
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import csv
import subprocess
import numpy as np
import pandas as pd
import pandas_gbq
import dateutil.parser
import logging
import argparse
from collections import namedtuple, defaultdict
import sys
import yattag
from classification.metadata import VESSEL_CLASS_DETAILED_NAMES, VESSEL_CATEGORIES, schema, atomic
import gzip
import dateutil.parser
import datetime
import pytz
from .ydump import css, ydump_table
import six
coarse_categories = [
'cargo_or_tanker', 'passenger', 'seismic_vessel', 'tug', 'other_fishing',
'drifting_longlines', 'seiners', 'fixed_gear', 'squid_jigger', 'trawlers',
'other_not_fishing']
coarse_mapping = defaultdict(set)
for k0, extra in [('fishing', 'other_fishing'),
('non_fishing', 'other_not_fishing')]:
for k1, v1 in schema['unknown'][k0].items():
key = k1 if (k1 in coarse_categories) else extra
if v1 is None:
coarse_mapping[key] |= {k1}
else:
coarse_mapping[key] |= set(atomic(v1))
coarse_mapping = [(k, coarse_mapping[k]) for k in coarse_categories]
fishing_mapping = [
['fishing', set(atomic(schema['unknown']['fishing']))],
['non_fishing', set(atomic(schema['unknown']['non_fishing']))],
]
fishing_category_map = {}
atomic_fishing = fishing_mapping[0][1]
for coarse, fine in coarse_mapping:
for atomic in fine:
if atomic in atomic_fishing:
fishing_category_map[atomic] = coarse
# Faster than using dateutil
def _parse(x):
if isinstance(x, datetime.datetime):
return x
# 2014-08-28T13:56:16+00:00
# TODO: fix generation to generate consistent datetimes
if x[-6:] == '+00:00':
x = x[:-6]
if x.endswith('.999999'):
x = x[:-7]
if x.endswith('Z'):
x = x[:-1]
try:
dt = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
except:
logging.fatal('Could not parse "%s"', x)
raise
return dt.replace(tzinfo=pytz.UTC)
LocalisationResults = namedtuple('LocalisationResults',
['true_fishing_by_id',
'pred_fishing_by_id', 'label_map'])
FishingRange = namedtuple('FishingRange',
['is_fishing', 'start_time', 'end_time'])
def ydump_fishing_localisation(doc, results):
doc, tag, text, line = doc.ttl()
y_true = np.concatenate(list(results.true_fishing_by_id.values()))
y_pred = np.concatenate(list(results.pred_fishing_by_id.values()))
header = ['Gear Type (id:true/total)', 'Precision', 'Recall', 'Accuracy', 'F1-Score']
rows = []
logging.info('Overall localisation accuracy %s',
accuracy_score(y_true, y_pred))
logging.info('Overall localisation precision %s',
precision_score(y_true, y_pred))
logging.info('Overall localisation recall %s',
recall_score(y_true, y_pred))
for cls in sorted(set(fishing_category_map.values())) + ['other'] :
true_chunks = []
pred_chunks = []
id_list = []
for id_ in results.label_map:
if id_ not in results.true_fishing_by_id:
continue
if fishing_category_map.get(results.label_map[id_], 'other') != cls:
continue
id_list.append(id_)
true_chunks.append(results.true_fishing_by_id[id_])
pred_chunks.append(results.pred_fishing_by_id[id_])
if len(true_chunks):
logging.info('ID for {}: {}'.format(cls, id_list))
y_true = np.concatenate(true_chunks)
y_pred = np.concatenate(pred_chunks)
rows.append(['{} ({}:{}/{})'.format(cls, len(true_chunks), sum(y_true), len(y_true)),
precision_score(y_true, y_pred),
recall_score(y_true, y_pred),
accuracy_score(y_true, y_pred),
f1_score(y_true, y_pred), ])
rows.append(['', '', '', '', ''])
y_true = np.concatenate(list(results.true_fishing_by_id.values()))
y_pred = np.concatenate(list(results.pred_fishing_by_id.values()))
rows.append(['Overall',
precision_score(y_true, y_pred),
recall_score(y_true, y_pred),
accuracy_score(y_true, y_pred),
f1_score(y_true, y_pred), ])
with tag('div', klass='unbreakable'):
ydump_table(
doc, header,
[[('{:.2f}'.format(x) if isinstance(x, float) else x) for x in row]
for row in rows])
def precision_score(y_true, y_pred):
y_true = np.asarray(y_true, dtype=bool)
y_pred = np.asarray(y_pred, dtype=bool)
true_pos = y_true & y_pred
all_pos = y_pred
return true_pos.sum() / all_pos.sum()
def recall_score(y_true, y_pred):
y_true = np.asarray(y_true, dtype=bool)
y_pred = np.asarray(y_pred, dtype=bool)
true_pos = y_true & y_pred
all_true = y_true
return true_pos.sum() / all_true.sum()
def f1_score(y_true, y_pred):
prec = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
return 2 / (1 / prec + 1 / recall)
def accuracy_score(y_true, y_pred, weights=None):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if weights is None:
weights = np.ones_like(y_pred).astype(float)
weights = np.asarray(weights)
correct = (y_true == y_pred)
return (weights * correct).sum() / weights.sum()
def load_inferred_fishing(table, id_list, project_id, threshold=True):
"""Load inferred data and generate comparison data
"""
query_template = """
SELECT vessel_id as id, start_time, end_time, nnet_score FROM
TABLE_DATE_RANGE([{table}],
TIMESTAMP('{year}-01-01'), TIMESTAMP('{year}-12-31'))
WHERE vessel_id in ({ids})
"""
ids = ','.join('"{}"'.format(x) for x in id_list)
ranges = defaultdict(list)
for year in range(2012, 2019):
query = query_template.format(table=table, year=year, ids=ids)
try:
df = pd.read_gbq(query, project_id=project_id, dialect='legacy')
except pandas_gbq.gbq.GenericGBQException as err:
if 'matches no table' in err.args[0]:
print('skipping', year)
continue
else:
print(query)
raise
for x in df.itertuples():
score = x.nnet_score
if threshold:
score = score > 0.5
start = x.start_time.replace(tzinfo=pytz.utc)
end = x.end_time.replace(tzinfo=pytz.utc)
ranges[x.id].append(FishingRange(score, start, end))
return ranges
def load_true_fishing_ranges_by_id(fishing_range_path,
split_map,
split,
threshold=True):
ranges_by_id = defaultdict(list)
parse = dateutil.parser.parse
with open(fishing_range_path) as f:
for row in csv.DictReader(f):
id_ = row['id'].strip()
if not split_map.get(id_) == str(split):
continue
val = float(row['is_fishing'])
if threshold:
val = val > 0.5
rng = (val, parse(row['start_time']).replace(tzinfo=pytz.UTC),
parse(row['end_time']).replace(tzinfo=pytz.UTC))
ranges_by_id[id_].append(rng)
return ranges_by_id
def datetime_to_minute(dt):
timestamp = (dt - datetime.datetime(
1970, 1, 1, tzinfo=pytz.utc)).total_seconds()
return int(timestamp // 60)
def compare_fishing_localisation(inferred_ranges, fishing_range_path,
label_map, split_map, split):
logging.debug('loading fishing ranges')
true_ranges_by_id = load_true_fishing_ranges_by_id(fishing_range_path,
split_map, split)
print("TRUE", sorted(true_ranges_by_id.keys())[:10])
print("INF", sorted(inferred_ranges.keys())[:10])
print(repr(sorted(true_ranges_by_id.keys())[0]))
print(repr(sorted(inferred_ranges.keys())[0]))
true_by_id = {}
pred_by_id = {}
for id_ in sorted(true_ranges_by_id.keys()):
id_ = six.ensure_text(id_)
logging.debug('processing %s', id_)
if id_ not in inferred_ranges:
continue
true_ranges = true_ranges_by_id[id_]
if not true_ranges:
continue
# Determine minutes from start to finish of this id, create an array to
# hold results and fill with -1 (unknown)
logging.debug('processing %s true ranges', len(true_ranges))
logging.debug('finding overall range')
_, start, end = true_ranges[0]
for (_, s, e) in true_ranges[1:]:
start = min(start, s)
end = max(end, e)
start_min = datetime_to_minute(start)
end_min = datetime_to_minute(end)
minutes = np.empty([end_min - start_min + 1, 2], dtype=int)
minutes.fill(-1)
# Fill in minutes[:, 0] with known true / false values
logging.debug('filling 0s')
for (is_fishing, s, e) in true_ranges:
s_min = datetime_to_minute(s)
e_min = datetime_to_minute(e)
for m in range(s_min - start_min, e_min - start_min + 1):
minutes[m, 0] = is_fishing
# fill in minutes[:, 1] with inferred true / false values
logging.debug('filling 1s')
for (is_fishing, s, e) in inferred_ranges[str(id_)]:
s_min = datetime_to_minute(s)
e_min = datetime_to_minute(e)
for m in range(s_min - start_min, e_min - start_min + 1):
if 0 <= m < len(minutes):
minutes[m, 1] = is_fishing
mask = ((minutes[:, 0] != -1) & (minutes[:, 1] != -1))
if mask.sum():
accuracy = (
(minutes[:, 0] == minutes[:, 1]) * mask).sum() / mask.sum()
logging.debug('Accuracy for ID %s: %s', id_, accuracy)
true_by_id[id_] = minutes[mask, 0]
pred_by_id[id_] = minutes[mask, 1]
return LocalisationResults(true_by_id, pred_by_id, label_map)
def compute_results(args):
logging.info('Loading label maps')
maps = defaultdict(dict)
with open(args.label_path) as f:
for row in csv.DictReader(f):
id_ = row['id'].strip()
if not row['split'] == str(args.split):
continue
for field in ['label', 'split']:
if row[field]:
if field == 'label':
if row[field].strip(
) not in VESSEL_CLASS_DETAILED_NAMES:
continue
maps[field][id_] = row[field]
# Sanity check the attribute mappings
for field in ['length', 'tonnage', 'engine_power', 'crew_size']:
for id_, value in maps[field].items():
assert float(value) > 0, (id_, value)
logging.info('Loading inference data')
ids = set([x for x in maps['split'] if maps['split'][x] == str(args.split)])
fishing_ranges = load_inferred_fishing(args.inference_table, ids, args.project_id)
logging.info('Comparing localisation')
results = {}
results['localisation'] = compare_fishing_localisation(
fishing_ranges, args.fishing_ranges, maps['label'],
maps['split'], args.split)
return results
def dump_html(args, results):
doc = yattag.Doc()
with doc.tag('style', type='text/css'):
doc.asis(css)
logging.info('Dumping Localisation')
doc.line('h2', 'Fishing Localisation')
ydump_fishing_localisation(doc, results['localisation'])
doc.stag('hr')
with open(args.dest_path, 'w') as f:
logging.info('Writing output')
f.write(yattag.indent(doc.getvalue(), indent_text=True))
"""
python -m classification.metrics.compute_fishing_metrics \
--inference-table machine_learning_dev_ttl_120d.test_dataflow_2016_ \
--label-path classification/data/fishing_classes.csv \
--dest-path test_fishing.html \
--fishing-ranges classification/data/combined_fishing_ranges.csv \
"""
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test fishing inference results and output metrics.\n')
parser.add_argument(
'--inference-table', help='table of inference results', required=True)
parser.add_argument(
'--project-id', help='Google Cloud project id',
default='world-fishing-827')
parser.add_argument(
'--label-path', help='path to test data', required=True)
parser.add_argument('--fishing-ranges', help='path to fishing range data', required=True)
parser.add_argument(
'--dest-path', help='path to write results to', required=True)
parser.add_argument('--split', type=int, default=0)
args = parser.parse_args()
results = compute_results(args)
dump_html(args, results)
|
|
#!/usr/bin/python
# Copyright 2012 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start/stop oVirt Engine
#
# chkconfig: - 65 34
# description: oVirt Engine
# pidfile: /var/run/ovirt-engine.pid
import configobj
import errno
import glob
import grp
import optparse
import os
import pwd
import resource
import signal
import stat
import string
import sys
import syslog
import time
import traceback
from Cheetah.Template import Template
# The name of the engine:
engineName = "engine-service"
# The engine system configuration variables:
engineSysconfig = None
# The name of the user and group that should run the service:
engineUser = None
engineGroup = None
engineUid = 0
engineGid = 0
# JBoss directories:
jbossHomeDir = None
# JBoss files:
jbossModulesJar = None
# Engine directories:
engineEtcDir = None
engineLogDir = None
engineTmpDir = None
engineUsrDir = None
engineVarDir = None
engineLockDir = None
engineContentDir = None
engineDeploymentsDir = None
engineEarDir = None
# Engine files:
enginePidFile = None
engineLoggingFile = None
engineConfigTemplateFile = None
engineConfigFile = None
engineLogFile = None
engineBootLogFile = None
engineConsoleLogFile = None
engineServerLogFile = None
def loadSysconfig():
# Load the configuration file:
engineSysconfigFile = "/etc/sysconfig/ovirt-engine"
if not os.path.exists(engineSysconfigFile):
raise Exception("The engine sysconfig file \"%s\" doesn't exist." % engineSysconfigFile)
global engineSysconfig
engineSysconfig = configobj.ConfigObj(engineSysconfigFile)
# Get the id of the engine user:
global engineUser
global engineUid
engineUser = getSysconfig("ENGINE_USER", "ovirt")
try:
engineUid = pwd.getpwnam(engineUser).pw_uid
except:
raise Exception("The engine user \"%s\" doesn't exist." % engineUser)
# Get id of the engine group:
global engineGroup
global engineGid
engineGroup = getSysconfig("ENGINE_GROUP", "ovirt")
try:
engineGid = grp.getgrnam(engineGroup).gr_gid
except:
raise Exception("The engine group \"%s\" doesn't exist." % engineGroup)
# JBoss directories:
global jbossHomeDir
jbossHomeDir = getSysconfig("JBOSS_HOME", "/usr/share/jboss-as")
# JBoss files:
global jbossModulesJar
jbossModulesJar = os.path.join(jbossHomeDir, "jboss-modules.jar")
# Engine directories:
global engineEtcDir
global engineLogDir
global engineTmpDir
global engineUsrDir
global engineVarDir
global engineLockDir
global engineServiceDir
global engineContentDir
global engineDeploymentsDir
global engineEarDir
engineEtcDir = getSysconfig("ENGINE_ETC", "/etc/ovirt-engine")
engineLogDir = getSysconfig("ENGINE_LOG", "/var/log/ovirt-engine")
engineTmpDir = getSysconfig("ENGINE_TMP", "/var/cache/ovirt-engine")
engineUsrDir = getSysconfig("ENGINE_USR", "/usr/share/ovirt-engine")
engineVarDir = getSysconfig("ENGINE_VAR", "/var/lib/ovirt-engine")
engineLockDir = getSysconfig("ENGINE_LOCK", "/var/lock/ovirt-engine")
engineServiceDir = os.path.join(engineUsrDir, "service")
engineContentDir = os.path.join(engineVarDir, "content")
engineDeploymentsDir = os.path.join(engineVarDir, "deployments")
engineEarDir = os.path.join(engineUsrDir, "engine.ear")
# Engine files:
global enginePidFile
global engineLoggingFile
global engineConfigTemplateFile
global engineConfigFile
global engineLogFile
global engineBootLogFile
global engineConsoleLogFile
global engineServerLogFile
enginePidFile = getSysconfig("ENGINE_PID", "/var/run/ovirt-engine.pid")
engineLoggingFile = os.path.join(engineServiceDir, "engine-service-logging.properties")
engineConfigTemplateFile = os.path.join(engineServiceDir, "engine-service.xml.in")
engineConfigFile = os.path.join(engineTmpDir, "engine-service.xml")
engineLogFile = os.path.join(engineLogDir, "engine.log")
engineBootLogFile = os.path.join(engineLogDir, "boot.log")
engineConsoleLogFile = os.path.join(engineLogDir, "console.log")
engineServerLogFile = os.path.join(engineLogDir, "server.log")
def getSysconfig(variable, default=None):
# Then try with the environment (it overrides the config file):
value = os.getenv(variable)
if value:
return value
# Then try with the config file:
value = engineSysconfig.get(variable)
if value:
return value
# Finally use the default value:
return default
def checkIdentity():
if os.getuid() != 0:
raise Exception("This script should run with the root user.")
def checkOwnership(name, uid=None, gid=None):
# Get the metadata of the file:
st = os.stat(name)
# Check that the file is owned by the given user:
if uid and st[stat.ST_UID] != uid:
user = pwd.getpwuid(uid).pw_name
owner = pwd.getpwuid(st[stat.ST_UID]).pw_name
if os.path.isdir(name):
raise Exception("The directory \"%s\" is not owned by user \"%s\", but by \"%s\"." % (name, user, owner))
else:
raise Exception("The file \"%s\" is not owned by user \"%s\", but by \"%s\"." % (name, user, owner))
# Check that the file is owned by the given group:
if gid and st[stat.ST_GID] != gid:
group = grp.getgrgid(gid).gr_name
owner = grp.getgrgid(st[stat.ST_GID]).gr_name
if os.path.isdir(name):
raise Exception("The directory \"%s\" is not owned by group \"%s\", but by \"%s\"." % (name, group, owner))
else:
raise Exception("The file \"%s\" is not owned by group \"%s\", but by \"%s\"." % (name, group, owner))
def checkDirectory(name, uid=None, gid=None):
if not os.path.isdir(name):
raise Exception("The directory \"%s\" doesn't exist." % name)
checkOwnership(name, uid, gid)
def checkFile(name, uid=None, gid=None):
if not os.path.isfile(name):
raise Exception("The file \"%s\" doesn't exist." % name)
checkOwnership(name, uid, gid)
def checkLog(name):
log = os.path.join(engineLogDir, name)
if os.path.exists(log):
checkOwnership(log, engineUid, engineGid)
def checkInstallation():
# Check the required JBoss directories and files:
checkDirectory(jbossHomeDir)
checkFile(jbossModulesJar)
# Check the required engine directories and files:
checkDirectory(engineEtcDir, uid=engineUid, gid=engineGid)
checkDirectory(engineLogDir, uid=engineUid, gid=engineGid)
checkDirectory(engineUsrDir, uid=0, gid=0)
checkDirectory(engineVarDir, uid=engineUid, gid=engineGid)
checkDirectory(engineLockDir, uid=engineUid, gid=engineGid)
checkDirectory(engineServiceDir, uid=0, gid=0)
checkDirectory(engineContentDir, uid=engineUid, gid=engineGid)
checkDirectory(engineDeploymentsDir, uid=engineUid, gid=engineGid)
checkDirectory(engineTmpDir, uid=engineUid, gid=engineGid)
checkDirectory(engineEarDir, uid=0, gid=0)
checkFile(engineLoggingFile)
checkFile(engineConfigTemplateFile)
# Check that log files are owned by the engine user, if they exist:
checkLog(engineLogFile)
checkLog(engineBootLogFile)
checkLog(engineConsoleLogFile)
checkLog(engineServerLogFile)
# XXX: Add more checks here!
def loadEnginePid():
if not os.path.exists(enginePidFile):
return None
with open(enginePidFile, "r") as enginePidFd:
return int(enginePidFd.read())
def saveEnginePid(pid):
with open(enginePidFile, "w") as enginePidFd:
enginePidFd.write(str(pid) + "\n")
def removeEnginePid():
if os.path.exists(enginePidFile):
os.remove(enginePidFile)
def startEngine():
# Get the PID:
enginePid = loadEnginePid()
if enginePid:
syslog.syslog(syslog.LOG_WARNING, "The engine PID file \"%s\" already exists." % enginePidFile)
return
# Make sure the engine archive directory is linked in the deployments
# directory, if not link it now:
engineEarLink = os.path.join(engineDeploymentsDir, "engine.ear")
if not os.path.islink(engineEarLink):
syslog.syslog(syslog.LOG_INFO, "The symbolic link \"%s\" doesn't exist, will create it now." % engineEarLink)
try:
os.symlink(engineEarDir, engineEarLink)
except:
raise Exception("Can't create symbolic link from \"%s\" to \"%s\"." % (engineEarLink, engineEarDir))
# Remove all existing deployment markers:
for markerFile in glob.glob("%s.*" % engineEarLink):
try:
os.remove(markerFile)
except:
raise Exception("Can't remove deployment marker file \"%s\"." % markerFile)
# Create the new marker file to trigger deployment of the engine:
markerFile = "%s.dodeploy" % engineEarLink
try:
markerFd = open(markerFile, "w")
markerFd.close()
except:
raise Exception("Can't create deployment marker file \"%s\"." % markerFile)
# Generate the main configuration from the template and copy it to the
# configuration directory making sure that the application server will be
# able to write to it:
engineConfigTemplate = Template(file=engineConfigTemplateFile, searchList=[engineSysconfig])
engineConfigText = str(engineConfigTemplate)
with open(engineConfigFile, "w") as engineConfigFd:
engineConfigFd.write(engineConfigText)
os.chown(engineConfigFile, engineUid, engineGid)
# Get heap configuration parameters from the environment or use defaults if
# they are not provided:
engineHeapMin = getSysconfig("ENGINE_HEAP_MIN", "1g")
engineHeapMax = getSysconfig("ENGINE_HEAP_MAX", "1g")
enginePermMin = getSysconfig("ENGINE_PERM_MIN", "256m")
enginePermMax = getSysconfig("ENGINE_PERM_MAX", "256m")
# Module path should include first the engine modules so that they can override
# those provided by the application server if needed:
jbossModulesDir = os.path.join(jbossHomeDir, "modules")
engineModulesDir = os.path.join(engineUsrDir, "modules")
engineModulePath = "%s:%s" % (engineModulesDir, jbossModulesDir)
# We start with an empty list of arguments:
engineArgs = []
# Add arguments for the java virtual machine:
engineArgs.extend([
# The name or the process, as displayed by ps:
engineName,
# Virtual machine options:
"-server",
"-XX:+UseCompressedOops",
"-XX:+TieredCompilation",
"-Xms%s" % engineHeapMin,
"-Xms%s" % engineHeapMax,
"-XX:PermSize=%s" % enginePermMin,
"-XX:MaxPermSize=%s" % enginePermMax,
"-Djava.net.preferIPv4Stack=true",
"-Dsun.rmi.dgc.client.gcInterval=3600000",
"-Dsun.rmi.dgc.server.gcInterval=3600000",
"-Djava.awt.headless=true",
])
# Add extra system properties provided in the configuration:
engineProperties = getSysconfig("ENGINE_PROPERTIES")
if engineProperties:
for engineProperty in engineProperties.split():
if not engineProperty.startswith("-D"):
engineProperty = "-D" + engineProperty
engineArgs.append(engineProperty)
# Add arguments for remote debugging of the java virtual machine:
engineDebugAddress = getSysconfig("ENGINE_DEBUG_ADDRESS")
if engineDebugAddress:
engineArgs.append("-Xrunjdwp:transport=dt_socket,address=%s,server=y,suspend=n" % engineDebugAddress)
# Enable verbose garbage collection if required:
engineVerboseGC = getSysconfig("ENGINE_VERBOSE_GC", "false").lower()
if engineVerboseGC in [ "t", "true", "y", "yes" ]:
engineArgs.extend([
"-verbose:gc",
"-XX:+PrintGCTimeStamps",
"-XX:+PrintGCDetails",
])
# Add arguments for JBoss:
engineArgs.extend([
"-Djava.util.logging.manager=org.jboss.logmanager",
"-Dlogging.configuration=file://%s" % engineLoggingFile,
"-Dorg.jboss.resolver.warning=true",
"-Djboss.modules.system.pkgs=org.jboss.byteman",
"-Djboss.server.default.config=engine-service",
"-Djboss.home.dir=%s" % jbossHomeDir,
"-Djboss.server.base.dir=%s" % engineUsrDir,
"-Djboss.server.config.dir=%s" % engineTmpDir,
"-Djboss.server.data.dir=%s" % engineVarDir,
"-Djboss.server.log.dir=%s" % engineLogDir,
"-Djboss.server.temp.dir=%s" % engineTmpDir,
"-Djboss.controller.temp.dir=%s" % engineTmpDir,
"-jar", jbossModulesJar,
"-mp", engineModulePath,
"-jaxpmodule", "javax.xml.jaxp-provider",
"org.jboss.as.standalone", "-c", os.path.basename(engineConfigFile),
])
# Fork a new process:
enginePid = os.fork()
# If this is the parent process then the last thing we have to do is
# saving the child process PID to the file:
if enginePid != 0:
syslog.syslog(syslog.LOG_INFO, "Started engine process %d." % enginePid)
saveEnginePid(enginePid)
return
# Change the resource limits while we are root as we won't be
# able to change them once we assume the engine identity:
engineNofile = int(getSysconfig("ENGINE_NOFILE", "65535"))
resource.setrlimit(resource.RLIMIT_NOFILE, (engineNofile, engineNofile))
# This is the child process, first thing we do is assume the engine
# identity:
os.setgid(engineGid)
os.setuid(engineUid)
# Then close standard input and some other security measures:
os.close(0)
os.setsid()
os.chdir("/")
# Then open the console log and redirect standard output and errors to it:
engineConsoleFd = os.open(engineConsoleLogFile, os.O_CREAT | os.O_WRONLY | os.O_APPEND, 0660)
os.dup2(engineConsoleFd, 1)
os.dup2(engineConsoleFd, 2)
os.close(engineConsoleFd)
# Prepare a clean environment:
engineEnv = {
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin",
"LANG": "en_US.UTF-8",
"ENGINE_ETC": engineEtcDir,
"ENGINE_LOG": engineLogDir,
"ENGINE_TMP": engineTmpDir,
"ENGINE_USR": engineUsrDir,
"ENGINE_VAR": engineVarDir,
"ENGINE_LOCK": engineLockDir,
}
# Finally execute the java virtual machine:
os.execvpe("java", engineArgs, engineEnv)
def stopEngine():
# Load the PID:
enginePid = loadEnginePid()
if not enginePid:
syslog.syslog(syslog.LOG_INFO, "The engine PID file \"%s\" doesn't exist." % enginePidFile)
return
# First check that the process exists:
if not os.path.exists("/proc/%d" % enginePid):
syslog.syslog(syslog.LOG_WARNING, "The engine PID file \"%s\" contains %d, but that process doesn't exist, will just remove the file." % (enginePidFile, enginePid))
removeEnginePid()
return
# Get the time to wait for the engine to stop from the configuration:
stopTime = int(getSysconfig("ENGINE_STOP_TIME", "10"))
stopInterval = int(getSysconfig("ENGINE_STOP_INTERVAL", "1"))
# Kill the process softly and wait for it to dissapear or for the timeout
# to expire:
os.kill(enginePid, signal.SIGTERM)
initialTime = time.time()
timeElapsed = 0
while os.path.exists("/proc/%d" % enginePid):
syslog.syslog(syslog.LOG_INFO, "Waiting up to %d seconds for engine process %d to finish." % ((stopTime - timeElapsed), enginePid))
timeElapsed = time.time() - initialTime
if timeElapsed > stopTime:
break
time.sleep(stopInterval)
# If the process didn't dissapear after the allowed time then we forcibly
# kill it:
if os.path.exists("/proc/%d" % enginePid):
syslog.syslog(syslog.LOG_WARNING, "The engine process %d didn't finish after waiting %d seconds, killing it." % (enginePid, timeElapsed))
os.kill(enginePid, signal.SIGKILL)
syslog.syslog(syslog.LOG_WARNING, "Killed engine process %d." % enginePid)
else:
syslog.syslog(syslog.LOG_INFO, "Stopped engine process %d." % enginePid)
# And finally we remove the PID file:
removeEnginePid()
def checkEngine():
# First check that the engine PID file exists:
enginePid = loadEnginePid()
if not enginePid:
raise Exception("The engine PID file \"%s\" doesn't exist." % enginePidFile)
# Now check that the process exists:
if not os.path.exists("/proc/%d" % enginePid):
raise Exception("The engine PID file \"%s\" contains %d, but that process doesn't exist." % (enginePidFile, enginePid))
# XXX: Here we could check deeper the status of the engine sending a
# request to the health status servlet.
syslog.syslog(syslog.LOG_INFO, "Engine process %d is running." % enginePid)
def showUsage():
print("Usage: %s {start|stop|restart|status}" % engineName)
def prettyAction(label, action):
# Determine the colors to use according to the type of terminal:
colorNormal = ""
colorSuccess = ""
colorFailure = ""
moveColumn = ""
if os.getenv("TERM") in ["linux", "xterm"]:
colorNormal = "\033[0;39m"
colorSuccess = "\033[0;32m"
colorFailure = "\033[0;31m"
moveColumn = "\033[60G"
# Inform that we are doing the job:
sys.stdout.write(label + " " + engineName + ":")
sys.stdout.flush()
# Do the real action:
try:
action()
sys.stdout.write(moveColumn + " [ " + colorSuccess + "OK" + colorNormal + " ]\n")
except Exception as exception:
sys.stdout.write(moveColumn + " [" + colorFailure + "FAILED" + colorNormal + "]\n")
raise
def main():
# Open connection to the syslog daemon:
syslog.openlog(engineName, syslog.LOG_PID)
# Check the arguments:
args = sys.argv[1:]
if len(args) != 1 or not args[0] in [ "start", "stop", "restart", "status" ]:
showUsage()
sys.exit(1)
try:
# Load the configuration:
loadSysconfig()
# Do some important checks:
checkIdentity()
checkInstallation()
# Perform the requested action:
action = args[0].lower()
if action == "start":
prettyAction("Starting", startEngine)
elif action == "stop":
prettyAction("Stopping", stopEngine)
elif action == "restart":
prettyAction("Stopping", stopEngine)
prettyAction("Starting", startEngine)
elif action == "status":
try:
checkEngine()
print("ovirt-engine is running")
except:
print("ovirt-engine is stopped")
raise
except Exception as exception:
#traceback.print_exc()
syslog.syslog(syslog.LOG_ERR, str(exception))
sys.exit(1)
else:
sys.exit(0)
# Close connection to syslog:
syslog.closelog()
if __name__ == "__main__":
main()
|
|
"""
Open compressed files transparently.
"""
__all__ = ["xopen", "PipedGzipWriter", "PipedGzipReader", "__version__"]
import gzip
import sys
import io
import os
import bz2
import time
import stat
import signal
import pathlib
import subprocess
import tempfile
from abc import ABC, abstractmethod
from subprocess import Popen, PIPE, DEVNULL
from typing import Optional, TextIO, AnyStr, IO
from ._version import version as __version__
try:
import lzma
except ImportError:
lzma = None # type: ignore
try:
from isal import igzip, isal_zlib # type: ignore
except ImportError:
igzip = None
isal_zlib = None
try:
import fcntl
# fcntl.F_SETPIPE_SZ will be available in python 3.10.
# https://github.com/python/cpython/pull/21921
# If not available: set it to the correct value for known platforms.
if not hasattr(fcntl, "F_SETPIPE_SZ") and sys.platform == "linux":
setattr(fcntl, "F_SETPIPE_SZ", 1031)
except ImportError:
fcntl = None # type: ignore
_MAX_PIPE_SIZE_PATH = pathlib.Path("/proc/sys/fs/pipe-max-size")
if _MAX_PIPE_SIZE_PATH.exists():
_MAX_PIPE_SIZE = int(_MAX_PIPE_SIZE_PATH.read_text()) # type: Optional[int]
else:
_MAX_PIPE_SIZE = None
try:
from os import fspath # Exists in Python 3.6+
except ImportError:
def fspath(path): # type: ignore
if hasattr(path, "__fspath__"):
return path.__fspath__()
# Python 3.4 and 3.5 have pathlib, but do not support the file system
# path protocol
if pathlib is not None and isinstance(path, pathlib.Path):
return str(path)
if not isinstance(path, str):
raise TypeError("path must be a string")
return path
def _available_cpu_count() -> int:
"""
Number of available virtual or physical CPUs on this system
Adapted from http://stackoverflow.com/a/1006301/715090
"""
try:
return len(os.sched_getaffinity(0))
except AttributeError:
pass
import re
try:
with open('/proc/self/status') as f:
status = f.read()
m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', status)
if m:
res = bin(int(m.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return res
except OSError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
return 1
def _set_pipe_size_to_max(fd: int) -> None:
"""
Set pipe size to maximum on platforms that support it.
:param fd: The file descriptor to increase the pipe size for.
"""
if not hasattr(fcntl, "F_SETPIPE_SZ") or not _MAX_PIPE_SIZE:
return
try:
fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, _MAX_PIPE_SIZE) # type: ignore
except OSError:
pass
def _can_read_concatenated_gz(program: str) -> bool:
"""
Check if a concatenated gzip file can be read properly. Not all deflate
programs handle this properly.
"""
fd, temp_path = tempfile.mkstemp(suffix=".gz", prefix="xopen.")
try:
# Create a concatenated gzip file. gzip.compress recreates the contents
# of a gzip file including header and trailer.
with open(temp_path, "wb") as temp_file:
temp_file.write(gzip.compress(b"AB") + gzip.compress(b"CD"))
try:
result = subprocess.run([program, "-c", "-d", temp_path],
check=True, stderr=PIPE, stdout=PIPE)
return result.stdout == b"ABCD"
except subprocess.CalledProcessError:
# Program can't read zip
return False
finally:
os.close(fd)
os.remove(temp_path)
class Closing(ABC):
"""
Inherit from this class and implement a close() method to offer context
manager functionality.
"""
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __del__(self):
try:
self.close()
except Exception:
pass
@abstractmethod
def close(self):
pass
class PipedCompressionWriter(Closing):
"""
Write Compressed files by running an external process and piping into it.
"""
def __init__(self, path, program: str, mode='wt',
compresslevel: Optional[int] = None,
threads_flag: str = None,
threads: Optional[int] = None):
"""
mode -- one of 'w', 'wt', 'wb', 'a', 'at', 'ab'
compresslevel -- compression level
threads_flag -- which flag is used to denote the number of threads in the program.
If set to none, program will be called without threads flag.
threads (int) -- number of threads. If this is set to None, a reasonable default is
used. At the moment, this means that the number of available CPU cores is used, capped
at four to avoid creating too many threads. Use 0 to use all available cores.
"""
if mode not in ('w', 'wt', 'wb', 'a', 'at', 'ab'):
raise ValueError(
"Mode is '{}', but it must be 'w', 'wt', 'wb', 'a', 'at' or 'ab'".format(mode))
# TODO use a context manager
self.outfile = open(path, mode)
self.closed = False
self.name = path
self._mode = mode
self._program = program
self._threads_flag = threads_flag
if threads is None:
threads = min(_available_cpu_count(), 4)
self._threads = threads
try:
self.process = self._open_process(
mode, compresslevel, threads, self.outfile)
except OSError:
self.outfile.close()
raise
assert self.process.stdin is not None
_set_pipe_size_to_max(self.process.stdin.fileno())
if 'b' not in mode:
self._file = io.TextIOWrapper(self.process.stdin) # type: IO
else:
self._file = self.process.stdin
def __repr__(self):
return "{}('{}', mode='{}', program='{}', threads={})".format(
self.__class__.__name__,
self.name,
self._mode,
self._program,
self._threads,
)
def _open_process(
self, mode: str, compresslevel: Optional[int], threads: int, outfile: TextIO,
) -> Popen:
program_args = [self._program]
if threads != 0 and self._threads_flag is not None:
program_args += [self._threads_flag, str(threads)]
extra_args = []
if 'w' in mode and compresslevel is not None:
extra_args += ['-' + str(compresslevel)]
kwargs = dict(stdin=PIPE, stdout=outfile, stderr=DEVNULL)
# Setting close_fds to True in the Popen arguments is necessary due to
# <http://bugs.python.org/issue12786>.
# However, close_fds is not supported on Windows. See
# <https://github.com/marcelm/cutadapt/issues/315>.
if sys.platform != 'win32':
kwargs['close_fds'] = True
process = Popen(program_args + extra_args, **kwargs) # type: ignore
return process
def write(self, arg: AnyStr) -> None:
self._file.write(arg)
def close(self) -> None:
if self.closed:
return
self.closed = True
self._file.close()
retcode = self.process.wait()
self.outfile.close()
if retcode != 0:
raise OSError(
"Output {} process terminated with exit code {}".format(self._program, retcode))
def __iter__(self): # type: ignore
# For compatibility with Pandas, which checks for an __iter__ method
# to determine whether an object is file-like.
return self
def __next__(self):
raise io.UnsupportedOperation('not readable')
class PipedCompressionReader(Closing):
"""
Open a pipe to a process for reading a compressed file.
"""
def __init__(
self,
path,
program: str,
mode: str = "r",
threads_flag: Optional[str] = None,
threads: Optional[int] = None,
):
"""
Raise an OSError when pigz could not be found.
"""
if mode not in ('r', 'rt', 'rb'):
raise ValueError("Mode is '{}', but it must be 'r', 'rt' or 'rb'".format(mode))
self._program = program
program_args = [program, '-cd', path]
if threads_flag is not None:
if threads is None:
# Single threaded behaviour by default because:
# - Using a single thread to read a file is the least unexpected
# behaviour. (For users of xopen, who do not know which backend is used.)
# - There is quite a substantial overhead (+25% CPU time) when
# using multiple threads while there is only a 10% gain in wall
# clock time.
threads = 1
program_args += [threads_flag, str(threads)]
self._threads = threads
self.process = Popen(program_args, stdout=PIPE, stderr=PIPE)
self.name = path
assert self.process.stdout is not None
_set_pipe_size_to_max(self.process.stdout.fileno())
self._mode = mode
if 'b' not in mode:
self._file = io.TextIOWrapper(self.process.stdout) # type: IO
else:
self._file = self.process.stdout
assert self.process.stderr is not None
self._stderr = io.TextIOWrapper(self.process.stderr)
self.closed = False
# Give the subprocess a little bit of time to report any errors (such as
# a non-existing file)
time.sleep(0.01)
self._raise_if_error()
def __repr__(self):
return "{}('{}', mode='{}', program='{}', threads={})".format(
self.__class__.__name__,
self.name,
self._mode,
self._program,
self._threads,
)
def close(self) -> None:
if self.closed:
return
self.closed = True
retcode = self.process.poll()
if retcode is None:
# still running
self.process.terminate()
allow_sigterm = True
else:
allow_sigterm = False
self.process.wait()
self._file.close()
self._raise_if_error(allow_sigterm=allow_sigterm)
self._stderr.close()
def __iter__(self):
return self
def __next__(self) -> AnyStr:
return self._file.__next__()
def _raise_if_error(self, allow_sigterm: bool = False) -> None:
"""
Raise IOError if process is not running anymore and the exit code is
nonzero. If allow_sigterm is set and a SIGTERM exit code is
encountered, no error is raised.
"""
retcode = self.process.poll()
if (
retcode is not None and retcode != 0
and not (allow_sigterm and retcode == -signal.SIGTERM)
):
message = self._stderr.read().strip()
self._file.close()
self._stderr.close()
raise OSError("{} (exit code {})".format(message, retcode))
def read(self, *args) -> AnyStr:
return self._file.read(*args)
def readinto(self, *args):
return self._file.readinto(*args)
def readline(self, *args) -> AnyStr:
return self._file.readline(*args)
def seekable(self) -> bool:
return self._file.seekable()
def peek(self, n: int = None):
return self._file.peek(n) # type: ignore
def readable(self) -> bool:
return self._file.readable()
def writable(self) -> bool:
return self._file.writable()
def flush(self) -> None:
return None
class PipedGzipReader(PipedCompressionReader):
"""
Open a pipe to pigz for reading a gzipped file. Even though pigz is mostly
used to speed up writing by using many compression threads, it is
also faster when reading, even when forced to use a single thread
(ca. 2x speedup).
"""
def __init__(self, path, mode: str = "r", threads: Optional[int] = None):
try:
super().__init__(path, "pigz", mode, "-p", threads)
except OSError:
super().__init__(path, "gzip", mode, None, threads)
class PipedGzipWriter(PipedCompressionWriter):
"""
Write gzip-compressed files by running an external gzip or pigz process and
piping into it. pigz is tried first. It is fast because it can compress using
multiple cores. Also it is more efficient on one core.
If pigz is not available, a gzip subprocess is used. On Python 3, gzip.GzipFile is on
par with gzip itself, but running an external gzip can still reduce wall-clock
time because the compression happens in a separate process.
"""
def __init__(
self,
path,
mode: str = "wt",
compresslevel: Optional[int] = None,
threads: Optional[int] = None,
):
"""
mode -- one of 'w', 'wt', 'wb', 'a', 'at', 'ab'
compresslevel -- compression level
threads (int) -- number of pigz threads. If this is set to None, a reasonable default is
used. At the moment, this means that the number of available CPU cores is used, capped
at four to avoid creating too many threads. Use 0 to let pigz use all available cores.
"""
if compresslevel is not None and compresslevel not in range(1, 10):
raise ValueError("compresslevel must be between 1 and 9")
try:
super().__init__(path, "pigz", mode, compresslevel, "-p", threads)
except OSError:
super().__init__(path, "gzip", mode, compresslevel, None, threads)
class PipedIGzipReader(PipedCompressionReader):
"""
Uses igzip for reading of a gzipped file. This is much faster than either
gzip or pigz which were written to run on a wide array of systems. igzip
can only run on x86 and ARM architectures, but is able to use more
architecture-specific optimizations as a result.
"""
def __init__(self, path, mode: str = "r"):
if not _can_read_concatenated_gz("igzip"):
# Instead of elaborate version string checking once the problem is
# fixed, it is much easier to use this, "proof in the pudding" type
# of evaluation.
raise ValueError(
"This version of igzip does not support reading "
"concatenated gzip files and is therefore not "
"safe to use. See: https://github.com/intel/isa-l/issues/143")
super().__init__(path, "igzip", mode)
class PipedIGzipWriter(PipedCompressionWriter):
"""
Uses igzip for writing a gzipped file. This is much faster than either
gzip or pigz which were written to run on a wide array of systems. igzip
can only run on x86 and ARM architectures, but is able to use more
architecture-specific optimizations as a result.
Threads are supported by a flag, but do not add any speed. Also on some
distro version (isal package in debian buster) the thread flag is not
present. For these reason threads are omitted from the interface.
Only compresslevel 0-3 are supported and these output slightly different
filesizes from their pigz/gzip counterparts.
See: https://gist.github.com/rhpvorderman/4f1201c3f39518ff28dde45409eb696b
"""
def __init__(self, path, mode: str = "wt", compresslevel: Optional[int] = None):
if compresslevel is not None and compresslevel not in range(0, 4):
raise ValueError("compresslevel must be between 0 and 3")
super().__init__(path, "igzip", mode, compresslevel)
def _open_stdin_or_out(mode: str) -> IO:
# Do not return sys.stdin or sys.stdout directly as we want the returned object
# to be closable without closing sys.stdout.
std = dict(r=sys.stdin, w=sys.stdout)[mode[0]]
return open(std.fileno(), mode=mode, closefd=False)
def _open_bz2(filename, mode: str) -> IO:
return bz2.open(filename, mode)
def _open_xz(filename, mode: str) -> IO:
if lzma is None:
raise ImportError(
"Cannot open xz files: The lzma module is not available (use Python 3.3 or newer)")
return lzma.open(filename, mode)
def _open_gz_external(filename, mode, compresslevel, threads):
if 'r' in mode:
try:
return PipedIGzipReader(filename, mode)
except (OSError, ValueError):
# No igzip installed or version does not support reading
# concatenated files.
return PipedGzipReader(filename, mode, threads=threads)
else:
try:
return PipedIGzipWriter(filename, mode, compresslevel)
except (OSError, ValueError):
# No igzip installed or compression level higher than 3
return PipedGzipWriter(filename, mode, compresslevel,
threads=threads)
def _open_gz(filename, mode: str, compresslevel, threads):
if threads != 0:
try:
return _open_gz_external(filename, mode, compresslevel, threads)
except OSError:
pass # We try without threads.
if 'r' in mode:
if igzip is not None:
return igzip.open(filename, mode)
return gzip.open(filename, mode)
if igzip is not None:
try:
return igzip.open(filename, mode,
compresslevel=isal_zlib.ISAL_DEFAULT_COMPRESSION
if compresslevel is None else compresslevel)
except ValueError:
# Compression level not supported, move to built-in gzip.
pass
# Override gzip.open's default of 9 for consistency with command-line gzip.
return gzip.open(filename, mode,
compresslevel=6 if compresslevel is None else compresslevel)
def _detect_format_from_content(filename: str) -> Optional[str]:
"""
Attempts to detect file format from the content by reading the first
6 bytes. Returns None if no format could be detected.
"""
try:
if stat.S_ISREG(os.stat(filename).st_mode):
with open(filename, "rb") as fh:
bs = fh.read(6)
if bs[:2] == b'\x1f\x8b':
# https://tools.ietf.org/html/rfc1952#page-6
return "gz"
elif bs[:3] == b'\x42\x5a\x68':
# https://en.wikipedia.org/wiki/List_of_file_signatures
return "bz2"
elif bs[:6] == b'\xfd\x37\x7a\x58\x5a\x00':
# https://tukaani.org/xz/xz-file-format.txt
return "xz"
except OSError:
pass
return None
def _detect_format_from_extension(filename: str) -> Optional[str]:
"""
Attempts to detect file format from the filename extension.
Returns None if no format could be detected.
"""
if filename.endswith('.bz2'):
return "bz2"
elif filename.endswith('.xz'):
return "xz"
elif filename.endswith('.gz'):
return "gz"
else:
return None
def xopen(
filename,
mode: str = "r",
compresslevel: Optional[int] = None,
threads: Optional[int] = None,
) -> IO:
"""
A replacement for the "open" function that can also read and write
compressed files transparently. The supported compression formats are gzip,
bzip2 and xz. If the filename is '-', standard output (mode 'w') or
standard input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2, .xz is
xz/lzma and no compression assumed otherwise.
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is not available with BZ2 compression and
will raise an error.
compresslevel is the compression level for writing to gzip files.
This parameter is ignored for the other compression formats. If set to
None (default), level 6 is used.
threads only has a meaning when reading or writing gzip files.
When threads is None (the default), reading or writing a gzip file is done with a pigz
(parallel gzip) subprocess if possible. See PipedGzipWriter and PipedGzipReader.
When threads = 0, no subprocess is used.
"""
if mode in ('r', 'w', 'a'):
mode += 't'
if mode not in ('rt', 'rb', 'wt', 'wb', 'at', 'ab'):
raise ValueError("Mode '{}' not supported".format(mode))
filename = fspath(filename)
if filename == '-':
return _open_stdin_or_out(mode)
detected_format = _detect_format_from_extension(filename)
if detected_format is None and "w" not in mode:
detected_format = _detect_format_from_content(filename)
if detected_format == "gz":
return _open_gz(filename, mode, compresslevel, threads)
elif detected_format == "xz":
return _open_xz(filename, mode)
elif detected_format == "bz2":
return _open_bz2(filename, mode)
else:
return open(filename, mode)
|
|
"""The tests for the Rfxtrx sensor platform."""
import unittest
import pytest
from homeassistant.components import rfxtrx as rfxtrx_core
from homeassistant.const import TEMP_CELSIUS
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant, mock_component
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestSensorRfxtrx(unittest.TestCase):
"""Test the Rfxtrx sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, "rfxtrx")
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test with 0 sensor."""
assert setup_component(
self.hass, "sensor", {"sensor": {"platform": "rfxtrx", "devices": {}}}
)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
def test_old_config_sensor(self):
"""Test with 1 sensor."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "rfxtrx",
"devices": {
"sensor_0502": {
"name": "Test",
"packetid": "0a52080705020095220269",
"data_type": "Temperature",
}
},
}
},
)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
entity = rfxtrx_core.RFX_DEVICES["sensor_0502"]["Temperature"]
assert "Test" == entity.name
assert TEMP_CELSIUS == entity.unit_of_measurement
assert entity.state is None
def test_one_sensor(self):
"""Test with 1 sensor."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "rfxtrx",
"devices": {
"0a52080705020095220269": {
"name": "Test",
"data_type": "Temperature",
}
},
}
},
)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
entity = rfxtrx_core.RFX_DEVICES["sensor_0502"]["Temperature"]
assert "Test" == entity.name
assert TEMP_CELSIUS == entity.unit_of_measurement
assert entity.state is None
def test_one_sensor_no_datatype(self):
"""Test with 1 sensor."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "rfxtrx",
"devices": {"0a52080705020095220269": {"name": "Test"}},
}
},
)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
entity = rfxtrx_core.RFX_DEVICES["sensor_0502"]["Temperature"]
assert "Test" == entity.name
assert TEMP_CELSIUS == entity.unit_of_measurement
assert entity.state is None
entity_id = rfxtrx_core.RFX_DEVICES["sensor_0502"]["Temperature"].entity_id
entity = self.hass.states.get(entity_id)
assert "Test" == entity.name
assert "unknown" == entity.state
def test_several_sensors(self):
"""Test with 3 sensors."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "rfxtrx",
"devices": {
"0a52080705020095220269": {
"name": "Test",
"data_type": "Temperature",
},
"0a520802060100ff0e0269": {
"name": "Bath",
"data_type": ["Temperature", "Humidity"],
},
},
}
},
)
assert 2 == len(rfxtrx_core.RFX_DEVICES)
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == "sensor_0601":
device_num = device_num + 1
assert len(rfxtrx_core.RFX_DEVICES[id]) == 2
_entity_temp = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
_entity_hum = rfxtrx_core.RFX_DEVICES[id]["Humidity"]
assert "%" == _entity_hum.unit_of_measurement
assert "Bath" == _entity_hum.__str__()
assert _entity_hum.state is None
assert TEMP_CELSIUS == _entity_temp.unit_of_measurement
assert "Bath" == _entity_temp.__str__()
elif id == "sensor_0502":
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
assert entity.state is None
assert TEMP_CELSIUS == entity.unit_of_measurement
assert "Test" == entity.__str__()
assert 2 == device_num
def test_discover_sensor(self):
"""Test with discovery of sensor."""
assert setup_component(
self.hass,
"sensor",
{"sensor": {"platform": "rfxtrx", "automatic_add": True, "devices": {}}},
)
event = rfxtrx_core.get_rfx_object("0a520801070100b81b0279")
event.data = bytearray(b"\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES["sensor_0701"]["Temperature"]
assert 1 == len(rfxtrx_core.RFX_DEVICES)
assert {
"Humidity status": "normal",
"Temperature": 18.4,
"Rssi numeric": 7,
"Humidity": 27,
"Battery numeric": 9,
"Humidity status numeric": 2,
} == entity.device_state_attributes
assert "0a520801070100b81b0279" == entity.__str__()
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
event = rfxtrx_core.get_rfx_object("0a52080405020095240279")
event.data = bytearray(b"\nR\x08\x04\x05\x02\x00\x95$\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES["sensor_0502"]["Temperature"]
assert 2 == len(rfxtrx_core.RFX_DEVICES)
assert {
"Humidity status": "normal",
"Temperature": 14.9,
"Rssi numeric": 7,
"Humidity": 36,
"Battery numeric": 9,
"Humidity status numeric": 2,
} == entity.device_state_attributes
assert "0a52080405020095240279" == entity.__str__()
event = rfxtrx_core.get_rfx_object("0a52085e070100b31b0279")
event.data = bytearray(b"\nR\x08^\x07\x01\x00\xb3\x1b\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES["sensor_0701"]["Temperature"]
assert 2 == len(rfxtrx_core.RFX_DEVICES)
assert {
"Humidity status": "normal",
"Temperature": 17.9,
"Rssi numeric": 7,
"Humidity": 27,
"Battery numeric": 9,
"Humidity status numeric": 2,
} == entity.device_state_attributes
assert "0a520801070100b81b0279" == entity.__str__()
# trying to add a switch
event = rfxtrx_core.get_rfx_object("0b1100cd0213c7f210010f70")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 2 == len(rfxtrx_core.RFX_DEVICES)
def test_discover_sensor_noautoadd(self):
"""Test with discover of sensor when auto add is False."""
assert setup_component(
self.hass,
"sensor",
{"sensor": {"platform": "rfxtrx", "automatic_add": False, "devices": {}}},
)
event = rfxtrx_core.get_rfx_object("0a520801070100b81b0279")
event.data = bytearray(b"\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y")
assert 0 == len(rfxtrx_core.RFX_DEVICES)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
event = rfxtrx_core.get_rfx_object("0a52080405020095240279")
event.data = bytearray(b"\nR\x08\x04\x05\x02\x00\x95$\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
event = rfxtrx_core.get_rfx_object("0a52085e070100b31b0279")
event.data = bytearray(b"\nR\x08^\x07\x01\x00\xb3\x1b\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
def test_update_of_sensors(self):
"""Test with 3 sensors."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "rfxtrx",
"devices": {
"0a52080705020095220269": {
"name": "Test",
"data_type": "Temperature",
},
"0a520802060100ff0e0269": {
"name": "Bath",
"data_type": ["Temperature", "Humidity"],
},
},
}
},
)
assert 2 == len(rfxtrx_core.RFX_DEVICES)
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == "sensor_0601":
device_num = device_num + 1
assert len(rfxtrx_core.RFX_DEVICES[id]) == 2
_entity_temp = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
_entity_hum = rfxtrx_core.RFX_DEVICES[id]["Humidity"]
assert "%" == _entity_hum.unit_of_measurement
assert "Bath" == _entity_hum.__str__()
assert _entity_temp.state is None
assert TEMP_CELSIUS == _entity_temp.unit_of_measurement
assert "Bath" == _entity_temp.__str__()
elif id == "sensor_0502":
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
assert entity.state is None
assert TEMP_CELSIUS == entity.unit_of_measurement
assert "Test" == entity.__str__()
assert 2 == device_num
event = rfxtrx_core.get_rfx_object("0a520802060101ff0f0269")
event.data = bytearray(b"\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
event = rfxtrx_core.get_rfx_object("0a52080705020085220269")
event.data = bytearray(b"\nR\x08\x04\x05\x02\x00\x95$\x02y")
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
assert 2 == len(rfxtrx_core.RFX_DEVICES)
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == "sensor_0601":
device_num = device_num + 1
assert len(rfxtrx_core.RFX_DEVICES[id]) == 2
_entity_temp = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
_entity_hum = rfxtrx_core.RFX_DEVICES[id]["Humidity"]
assert "%" == _entity_hum.unit_of_measurement
assert 15 == _entity_hum.state
assert {
"Battery numeric": 9,
"Temperature": 51.1,
"Humidity": 15,
"Humidity status": "normal",
"Humidity status numeric": 2,
"Rssi numeric": 6,
} == _entity_hum.device_state_attributes
assert "Bath" == _entity_hum.__str__()
assert TEMP_CELSIUS == _entity_temp.unit_of_measurement
assert 51.1 == _entity_temp.state
assert {
"Battery numeric": 9,
"Temperature": 51.1,
"Humidity": 15,
"Humidity status": "normal",
"Humidity status numeric": 2,
"Rssi numeric": 6,
} == _entity_temp.device_state_attributes
assert "Bath" == _entity_temp.__str__()
elif id == "sensor_0502":
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]["Temperature"]
assert TEMP_CELSIUS == entity.unit_of_measurement
assert 13.3 == entity.state
assert {
"Humidity status": "normal",
"Temperature": 13.3,
"Rssi numeric": 6,
"Humidity": 34,
"Battery numeric": 9,
"Humidity status numeric": 2,
} == entity.device_state_attributes
assert "Test" == entity.__str__()
assert 2 == device_num
assert 2 == len(rfxtrx_core.RFX_DEVICES)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import copy
import yaml
import numpy as np
from fermipy import utils
def get_function_par_names(name):
"""Get the list of parameters associated with a function.
Parameters
----------
name : str
Name of the function.
"""
fn_spec = get_function_spec(name)
return copy.deepcopy(fn_spec['par_names'])
def get_function_norm_par_name(name):
"""Get the normalization parameter associated with a function.
Parameters
----------
name : str
Name of the function.
"""
fn_spec = get_function_spec(name)
return fn_spec['norm_par']
def get_function_defaults(name):
fn_spec = get_function_spec(name)
return copy.deepcopy(fn_spec['defaults'])
def get_function_spec(name):
"""Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary.
"""
if not hasattr(get_function_spec, 'fndict'):
modelfile = os.path.join('$FERMIPY_ROOT',
'data', 'models.yaml')
modelfile = os.path.expandvars(modelfile)
get_function_spec.fndict = yaml.load(open(modelfile))
if not name in get_function_spec.fndict.keys():
raise Exception('Invalid Function Name: %s' % name)
return get_function_spec.fndict[name]
def get_source_type(spatial_type):
"""Translate a spatial type string to a source type."""
if spatial_type == 'SkyDirFunction':
return 'PointSource'
else:
return 'DiffuseSource'
def get_spatial_type(spatial_model):
"""Translate a spatial model string to a spatial type."""
if spatial_model in ['SkyDirFunction', 'PointSource',
'Gaussian']:
return 'SkyDirFunction'
elif spatial_model in ['SpatialMap']:
return 'SpatialMap'
elif spatial_model in ['RadialGaussian', 'RadialDisk']:
try:
import pyLikelihood
if hasattr(pyLikelihood, 'RadialGaussian'):
return spatial_model
else:
return 'SpatialMap'
except Exception:
return spatial_model
else:
return spatial_model
def extract_pars_from_dict(name, src_dict):
par_names = get_function_par_names(name)
o = {}
for k in par_names:
o[k] = {}
if not k in src_dict:
continue
v = src_dict.pop(k)
if isinstance(v, dict):
o[k] = v.copy()
else:
o[k] = {'name': k, 'value': v}
return o
def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False):
"""Create a dictionary for the parameters of a function.
Parameters
----------
name : str
Name of the function.
pars_dict : dict
Existing parameter dict that will be merged with the
default dictionary created by this method.
rescale : bool
Rescale parameter values.
"""
o = get_function_defaults(name)
pars_dict = pars_dict.copy()
for k in o.keys():
if not k in pars_dict:
continue
v = pars_dict[k]
if not isinstance(v, dict):
v = {'name': k, 'value': v}
o[k].update(v)
kw = dict(update_bounds=update_bounds,
rescale=rescale)
if 'min' in v or 'max' in v:
kw['update_bounds'] = False
if 'scale' in v:
kw['rescale'] = False
o[k] = make_parameter_dict(o[k], **kw)
return o
def make_parameter_dict(pdict, fixed_par=False, rescale=True,
update_bounds=False):
"""
Update a parameter dictionary. This function will automatically
set the parameter scale and bounds if they are not defined.
Bounds are also adjusted to ensure that they encompass the
parameter value.
"""
o = copy.deepcopy(pdict)
o.setdefault('scale', 1.0)
if rescale:
value, scale = utils.scale_parameter(o['value'] * o['scale'])
o['value'] = np.abs(value) * np.sign(o['value'])
o['scale'] = np.abs(scale) * np.sign(o['scale'])
if 'error' in o:
o['error'] /= np.abs(scale)
if update_bounds:
o['min'] = o['value'] * 1E-3
o['max'] = o['value'] * 1E3
if fixed_par:
o['min'] = o['value']
o['max'] = o['value']
if float(o['min']) > float(o['value']):
o['min'] = o['value']
if float(o['max']) < float(o['value']):
o['max'] = o['value']
return o
def cast_pars_dict(pars_dict):
"""Cast the bool and float elements of a parameters dict to
the appropriate python types.
"""
o = {}
for pname, pdict in pars_dict.items():
o[pname] = {}
for k, v in pdict.items():
if k == 'free':
o[pname][k] = bool(int(v))
elif k == 'name':
o[pname][k] = v
else:
o[pname][k] = float(v)
return o
def pars_dict_to_vectors(function_name, pars_dict):
o = {'param_names': np.zeros(10, dtype='S32'),
'param_values': np.empty(10, dtype=float) * np.nan,
'param_errors': np.empty(10, dtype=float) * np.nan,
}
par_names = get_function_par_names(function_name)
for i, p in enumerate(par_names):
value = pars_dict[p]['value'] * pars_dict[p]['scale']
scale = pars_dict[p]['error'] * pars_dict[p]['scale']
o['param_names'][i] = p
o['param_values'][i] = value
o['param_errors'][i] = scale
return o
|
|
import PDB, Common, string, sys, os
def addResidueToResidueList(curr_residue_list, curr_res_num, curr_res_name, curr_atom_list):
if Common.debug:
print "Adding atoms to residue '%s' %s: %s" % ( \
curr_res_name,
curr_res_num,
map(lambda x: x.getName(), curr_atom_list))
r = PDB.Residue(number = curr_res_num,
name = curr_res_name,
atoms = curr_atom_list,
chain = None)
## store the chain to the residue accumulation list
curr_residue_list.append(r)
## if Common.debug: print "Current residue list is", map(lambda x: (x.getName(), x.getNumber()), curr_residue_list)
## store the back-pointer for each atom to its residue
for atom in curr_atom_list:
atom.setResidue(r)
def addChainToChainList(chains, curr_chain, curr_residue_list):
chains[curr_chain] = PDB.Chain(curr_chain, curr_residue_list)
## save backpointers from each residue to its chain
for residue in curr_residue_list:
residue.setChain(chains[curr_chain])
class PDBParser:
def __init__(self, handle):
self._handle = handle
def parse(self):
extra_records = []
## data structures that are saved after parsing
models = {}
seqres_chains = {}
## running data structures used to accumulate per-chain, per-residue, and per-atom data
curr_seqres_chain = None
chains = {}
curr_model_number = None
curr_res_num = None
curr_res_name = None
curr_atom_list = []
curr_residue_list = []
curr_chain = None
chain_ter_seen = {}
while 1:
line = self._handle.readline()
if Common.debug: print "PDBParser line: %s" % line,
## end of file
if line == '':
if Common.debug:
print "PDBParser end of file"
print "curr_atom_list is", curr_atom_list
print "curr_residue_list is", curr_residue_list
print "curr_chain is", curr_chain
if curr_atom_list != [] and curr_res_name not in Common.residue_skip_list:
addResidueToResidueList(curr_residue_list, curr_res_num, curr_res_name, curr_atom_list)
if curr_residue_list != [] and curr_res_name not in Common.residue_skip_list:
addChainToChainList(chains, curr_chain, curr_residue_list)
## in a single-model file without MODEL and ENDMDL records, we must set this:
if curr_model_number == None:
if Common.debug: print "PDBParser end of file, autosetting model 1"
models[1] = PDB.Model(chains)
break
## if we see a new model record, record its number
elif line[:5] == 'MODEL':
## the official format:
## curr_model_number = int(line[10:14])
## fix/hack due to TAB stored in pdbstyle files:
curr_model_number = int(string.split(line)[1])
if Common.debug: print "PDBParse: model entry", curr_model_number
continue
## at the end of a new model, clear away state data.
## The TER record before ENDMDL would have dealt with adding the model to the chain
elif line[:6] == 'ENDMDL':
if Common.debug: print "PDBParse end of model entry", curr_model_number
models[curr_model_number] = PDB.Model(chains)
## reset the chain_ter_seen for the new model
chains = {}
curr_res_num = None
curr_res_name = None
curr_atom_list = []
curr_residue_list = []
curr_chain = None
chain_ter_seen = {}
continue
elif line[:6] == 'SEQRES':
chain_id = line[11]
chain_data = string.split(line[19:70])
if Common.debug: print "PDBParser SEQRES:", chain_id, chain_data
if Common.debug: print "SEQRES", chain_id, chain_data
if curr_seqres_chain == None or curr_seqres_chain != chain_id:
if Common.debug: print "Setup new chain", chain_id
curr_seqres_chain = chain_id
seqres_chains[curr_seqres_chain] = ""
if Common.debug: print "Store SEQRES chain '%s' (%s)" % (chain_id, chain_data)
for res in chain_data:
seqres_chains[curr_seqres_chain] += Common.three_to_one(res)
elif line[:4] == 'ATOM' or line[:6] == 'HETATM':
atom_num = int(line[6:11])
atom_name = string.strip(line[12:16])
alternate = line[16]
res_name = string.strip(line[17:20])
chain_id = line[21]
res_num = string.strip(line[22:27])
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
try:
occupancy = float(line[55:60])
except:
occupancy = 0.0
try:
bfactor = float(line[60:66])
except:
bfactor = 0.0
try:
element = line[77]
except IndexError:
element = ' '
## if Common.debug: print "PDBParser ATOM:", atom_num, atom_name, res_name, chain_id, res_num, x, y, z, occupancy, bfactor, element
## if we've seen a TER record for this chain already, ignore any further entries with that
## chain_id, because these are just associated ions which will not contribute to the sequence
## this is broken if people re-use chain IDs, but that would be... stupid!
if chain_ter_seen.has_key(chain_id):
continue
## new chain; store previously accumulated residues
if curr_chain != chain_id and curr_res_name not in Common.residue_skip_list:
if Common.debug: print "New chain", chain_id, "curr chain", curr_chain
if curr_chain != None:
## build a new chain object
## store all the accumulated residues into this chain
if curr_atom_list != None and curr_res_num != None and curr_res_name != None:
addResidueToResidueList(curr_residue_list, curr_res_num, curr_res_name, curr_atom_list)
if Common.debug: print "Adding residues to chain %s: %s" % (curr_chain, map(lambda x: (x.getName(), x.getNumber()), curr_residue_list))
addChainToChainList(chains, curr_chain, curr_residue_list)
## store the new current chain
curr_chain = chain_id
## clear the accumulated residue
curr_residue_list = []
## clear the current residue number
curr_res_num = None
curr_res_name = None
## new residue; store the previously accumulated atoms
if curr_res_num != res_num:
## finished parsing a residue, so build a new
## residue object and store all the accumulated
## atoms into this residue
if curr_res_num != None and curr_res_name not in Common.residue_skip_list:
addResidueToResidueList(curr_residue_list, curr_res_num, curr_res_name, curr_atom_list)
if Common.debug: print "New residue", res_num
## rememebr the new current residue
curr_res_num = res_num
curr_res_name = res_name
## clear the accumulated atom list
curr_atom_list = []
## build a new atom object
if Common.debug: print "New atom: '%s', '%s', '%s', '%s', '%s'" % (atom_num, atom_name, alternate, res_name, res_num)
isHetatm = 0
if line[:6] == 'HETATM':
isHetatm = 1
atom = PDB.Atom(serial = atom_num,
name = atom_name,
alternate = alternate,
residue = None,
coords = (x,y,z),
occupancy = occupancy,
bfactor = bfactor,
element = element,
isHetatm = isHetatm)
curr_atom_list.append(atom)
## when we see a TER record, save the chain it came from
## from because some PDB files put ions 'associated' with the chain
## after the TER record:
## ATOM 3660 OXT GLN B 502 129.488 87.534 67.598 1.00168.80 O
## TER 3661 GLN B 502
## HETATM 3662 S SO4 B 1 99.307 73.882 58.307 1.00 63.92 S
elif line[:3] == 'TER':
if Common.debug: print "PDBParser: TER on chain", chain_id
try:
chain_id = line[21]
chain_ter_seen[chain_id] = 1
except:
chain_ter_seen[curr_chain] = 1
addResidueToResidueList(curr_residue_list, curr_res_num, curr_res_name, curr_atom_list)
addChainToChainList(chains, curr_chain, curr_residue_list)
curr_res_num = None
curr_res_name = None
curr_atom_list = []
curr_residue_list = []
elif line[:4] == 'END ' or line[:6] == 'CONECT':
pass
else:
extra_records.append(line)
## Build the sequences after everything else is done
## (I did this in case the SEQRES came after ATOM records, even though that's illegal)
for chain_id in chains.keys():
## store the per-chain seqres data in the chain
if seqres_chains.has_key(chain_id):
if Common.debug: print "Storing SEQRES chain '%s' (%s)" % (chain_id, seqres_chains[chain_id])
chains[chain_id].setSeqres(seqres_chains[chain_id])
return PDB.PDB(models, extra_records)
def main():
## Common.debug = 1
import getopt
pdb = None
chain = None
seqres = 0
outputPrefix = "PDB"
output_fasta = 0
output_pdb = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "p:c:o:sfd",["pdb=", "chain=","outputPrefix=","seqres","fasta","pdb"])
except getopt.GetoptError, what:
raise RuntimeError, "usage"
for o, a in opts:
if o in ('-p', 'pdb'):
pdbFile = a
if o in ('-c', 'chain'):
chain = a
if o in ('-o', 'outputPrefix'):
outputPrefix = a
if o in ('-s', 'seqres'):
seqres = 1
if o in ('-f', 'fasta'):
output_fasta = 1
if o in ('-d', 'pdb'):
output_pdb = 1
if not pdbFile or not chain:
raise RuntimeError, "missing pdbfile or chain"
f = open(pdbFile)
p = PDBParser(f).parse()
ms = p.getModels().keys()
ms.sort()
m = p.getModel(ms[0])
if output_pdb:
pdb_output = open("%s.pdb" % outputPrefix, "w")
if chain == "-":
p.formatPDB(pdb_output, None, not seqres)
else:
p.formatPDB(pdb_output, [chain], not seqres)
if output_fasta:
fasta_output = open("%s.fa" % outputPrefix, "w")
if chain == '-':
fasta_output.write(">%s all chains\n" % (os.path.basename(pdbFile)))
cs = m.getChains().keys()
for chain in cs:
c = m.getChain(chain)
if seqres:
seq = c.getSeqres()
else:
seq = c.getSequence()
fasta_output.write("%s/" % seq)
fasta_output.write("*\n")
else:
fasta_output.write(">%s chain %s\n" % (os.path.basename(pdbFile), chain))
c = m.getChain(chain)
if seqres:
fasta_output.write("%s\n" % c.getSeqres())
else:
fasta_output.write("%s\n" % c.getSequence())
def test():
p = PDBParser(open("/lab/db/pdb/hash/oc/pdb1oca.ent")).parse()
p.formatPDB(sys.stdout, None, 1)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""Demo of the most fundamental Tkinter and Pmw widgets"""
from Tkinter import *
import tkMessageBox, tkFileDialog, tkColorChooser
import Pmw
import string, sys, os
class InputFields:
"""
Demonstrate typical widgets for input data: entry, slider,
option menu, checkbutton, radiobuttons.
"""
def __init__(self,
parent,
status_line,
balloon=None,
scrolled=True):
"""
Create widgets.
parent parent widget
status_line Label with status of user actions
balloon balloon help widget
scrolled scrolled main frame or not
"""
self.master = parent
self.status_line = status_line
self.balloon = balloon
if scrolled:
# use an intelligent Pmw.ScrolledFrame widget to hold the
# whole window; scrollbars are automatically added if necessary
self.topframe = Pmw.ScrolledFrame(self.master,
usehullsize=1, hull_height=210, hull_width=340)
# (just Pmw.ScrolledFrame(self.master) gives a fixed-size
# domain with scrollbars; height/weight should here be adjusted
# pack self.topframe in a separate function)
# create all other widgets inside the top frame:
self.create(self.topframe.interior())
# or: self.create(self.topframe.component('frame'))
else:
# use a standard Tkinter Frame with adaptive size:
self.topframe = Frame(self.master, borderwidth=2, relief='groove')
self.topframe.pack(expand=True, fill='both')
# create all other widgets inside the top frame:
self.create(self.topframe)
def pack(self, **kwargs):
"""
Pack the topframe. The location of the InputFields GUI in
the parent widget can be controlled by the calling code.
"""
self.topframe.pack(kwargs, expand=True, fill='both')
def create(self, parent):
"""Create all widgets."""
# simple Pmw.EntryField:
self.case = StringVar() # tie a variable to an entry
self.case_widget = Pmw.EntryField(parent,
labelpos='w',
label_text='case name',
entry_width=15,
entry_textvariable=self.case,
command=self.status_entries)
# entry field with validate specification:
self.mass = DoubleVar(); self.mass.set(1.0)
self.mass_widget = Pmw.EntryField(parent,
labelpos='w', # n, nw, ne, e, and so on
label_text='mass',
validate={'validator': 'real', 'min': 0},
entry_width=15,
entry_textvariable=self.mass,
command=self.status_entries)
# skip tying the next entry to a variable, use value= and get()
self.damping_widget = Pmw.EntryField(parent,
labelpos='w',
label_text='damping',
validate={'validator': 'real', 'min': 0},
entry_width=15,
value=0.0,
command=self.status_entries)
# entry field with balloon help:
self.A = DoubleVar(); self.A.set('2')
self.A_widget = Pmw.EntryField(parent,
labelpos='w',
label_text='amplitude',
validate=None,
entry_width=15,
entry_textvariable=self.A,
command=self.status_entries)
try:
self.balloon.bind(self.A_widget,
'Pressing return updates the status line')
except:
# balloon help is not available
pass
# slider:
self.y0 = DoubleVar(); self.y0.set(0.2)
self.y0_widget = Scale(parent,
orient='horizontal',
from_=0, to=2, # range of slider
tickinterval=0.5, # tickmarks on the slider "axis"
resolution=0.05, # the steps of the counter above the slider
label='initial value y(0)', # label printed above the slider
#font='helvetica 12 italic', # optional font
length=300, # length of slider in pixels
variable=self.y0, # value is tied to self.y0
command=self.status_slider)
# option menu:
self.func = StringVar(); self.func.set('y')
self.func_widget = Pmw.OptionMenu(parent,
labelpos='w', # n, nw, ne, e, and so on
label_text='spring',
items=['y', 'y3', 'siny'],
menubutton_textvariable=self.func,
menubutton_width=6,
command=self.status_option)
# checkbutton:
self.store_data = IntVar(); self.store_data.set(1)
self.store_data_widget = Checkbutton(parent,
text='store data',
variable=self.store_data,
command=self.status_checkbutton)
# pack all entries and option menus, and align them nicely:
widgets = (self.case_widget, self.mass_widget,
self.damping_widget, self.A_widget,
self.func_widget)
for w in widgets:
w.pack(side='top', pady=3, anchor='w')
Pmw.alignlabels(widgets)
# pack slider and checkbutton at the end (these are
# adjusted to the left):
self.y0_widget.pack(side='top', anchor='w', pady=4)
self.store_data_widget.pack(side='top', anchor='w', pady=3)
def status_entries(self):
"""Read values from entry widgets or variables tied to them."""
s = "entry fields: '" + self.case.get() + \
"', " + str(self.mass.get()) + \
", " + self.damping_widget.get() + \
", " + str(self.A.get())
self.status_line.configure(text=s)
def status_slider(self, value):
self.status_line.configure(text='slider value: ' + \
str(self.y0.get()))
# or
self.status_line.configure(text='slider value: ' + value)
def status_option(self, value):
self.status_line.configure(text=self.func)
# or
self.status_line.configure(text=value)
def status_checkbutton(self):
self.status_line.configure(text='store data checkbutton: ' + \
str(self.store_data.get()))
class InputLists:
"""
Demonstrate various widgets that let the user choose
items from some kind of list:
standard listbox, combo boxes, radio buttons, collection of
checkbuttons, option menu.
"""
def __init__(self, parent, status_line, balloon=None):
self.master = parent
self.status_line = status_line
self.balloon = balloon
self.frame = Frame(self.master, borderwidth=3)
# pack self.frame in a separate function
self.create(self.frame)
def pack(self, **kwargs):
self.frame.pack(kwargs)
def create(self, parent):
header = Label(parent, text='Widgets for list data',
font='courier 14 bold', foreground='blue',
background='#%02x%02x%02x' % (196,196,196))
header.pack(side='top', pady=10, ipady=10, fill='x')
# frame for left-to-right packing of single-selection
# list-like widgets:
frame = Frame(parent); frame.pack(side='top')
# the various widgets are aligned with a common top line,
# obtained by anchor='n'
# create list:
listitems = ['list item ' + str(i+1) for i in range(40)]
# standard listbox:
self.list1 = Pmw.ScrolledListBox(frame,
listbox_selectmode='single', # 'multiple'
vscrollmode='static', hscrollmode='dynamic',
listbox_width=12, listbox_height=6,
label_text='plain listbox\nsingle selection',
# labelpos is needed if label_text is present,
# choices: n (north), nw (north-west), s (south) ...
labelpos='n',
selectioncommand=self.status_list1)
self.list1.pack(side='left', padx=10, anchor='n')
# insert items:
for item in listitems:
self.list1.insert('end', item) # insert after end of list
# could also say
# self.list.configure(items=listitems)
# or give the items value as keyword 'items='
# at construction time
# example on updating an option in, e.g., the Tkinter
# Listbox part of a Pmw.ScrolledListBox:
# self.list.component('listbox').configure(background='blue')
# i.e. the parts (listbox, label, etc) are ordinary Tkinter
# widgets that can be extracted by the component method
# simple combo box with list and entry for chosen item:
self.combo1 = Pmw.ComboBox(frame,
label_text='simple combo box',
labelpos='nw',
scrolledlist_items=listitems,
selectioncommand=self.status_combobox,
listbox_height=6,
dropdown=False)
self.combo1.pack(side='left', padx=10, anchor='n')
# dropdown combo box with entry for chosen item and
# button for showing the list:
self.combo2 = Pmw.ComboBox(frame,
label_text='dropdown combo box',
labelpos='nw',
scrolledlist_items=listitems,
selectioncommand=self.status_combobox,
listbox_height=6,
dropdown=True) # the only difference from combo1
self.combo2.pack(side='left', padx=10, anchor='n')
if self.balloon is not None:
self.balloon.bind(self.combo2, 'Click on arrow to display list')
frame_left = Frame(parent); frame_left.pack(side='left')
# standard listbox:
self.list2 = Pmw.ScrolledListBox(frame_left,
listbox_selectmode='multiple',
vscrollmode='static', hscrollmode='dynamic',
listbox_width=12, listbox_height=6,
label_text='plain listbox\nmultiple selection',
labelpos='n',
items=listitems,
selectioncommand=self.status_list2)
self.list2.pack(side='left', anchor='n')
# frame_right holds other widgets packed top-bottom:
frame_right = Frame(parent); frame_right.pack(side='left')
# option menu:
self.option_var = StringVar(); self.option_var.set('item2')
self.option1 = Pmw.OptionMenu(frame_right,
labelpos='w', # n, nw, ne, e, and so on
label_text='Option Menu:',
items=['item1', 'item2', 'item3', 'item4'],
menubutton_textvariable=self.option_var,
menubutton_width=6,
command=self.status_option)
self.option1.pack(side='top', anchor='w')
# plain Tk radio buttons, tied to a variable:
self.radio_var = StringVar() # common variable for radio buttons
self.radio1 = Frame(frame_right)
self.radio1.pack(side='top', pady=5)
Label(self.radio1,
text='Tk radio buttons').pack(side='left')
for radio in ('radio1', 'radio2', 'radio3', 'radio4'):
r = Radiobutton(self.radio1, text=radio, variable=self.radio_var,
value='radiobutton no. ' + radio[5],
command=self.status_radio1)
r.pack(side='left')
# Pmw radio buttons
self.radio2 = Pmw.RadioSelect(frame_right,
selectmode='single',
buttontype='radiobutton', # 'button': plain button layout
labelpos='w',
label_text='Pmw radio buttons\nsingle selection',
orient='horizontal',
frame_relief='ridge', # try some decoration...
command=self.status_radio2)
self.radio2.pack(side='top', padx=10, anchor='w')
# add items; radio buttons are only feasible for a few items:
for text in ('item1', 'item2', 'item3', 'item4'):
self.radio2.add(text)
self.radio2.invoke('item2') # 'item2' is pressed by default
# check button list:
self.radio3 = Pmw.RadioSelect(frame_right,
selectmode='multiple',
buttontype='checkbutton',
labelpos='w',
label_text='Pmw check buttons\nmultiple selection',
orient='horizontal',
frame_relief='ridge', # try some decoration...
command=self.status_radio3)
self.radio3.pack(side='top', padx=10, anchor='w')
# add items; radio buttons are only feasible for a few items:
for text in ('item1', 'item2', 'item3', 'item4'):
self.radio3.add(text)
# press 'item2' and 'item4' by default:
self.radio3.invoke('item2'); self.radio3.invoke('item4')
def status_list1(self):
"""Extract single list selection."""
selected_item = self.list1.getcurselection()[0]
selected_index = self.list1.curselection()[0]
text = 'selected list item=' + str(selected_item) + \
', index=' + str(selected_index)
self.status_line.configure(text=text)
def status_list2(self):
"""Extract multiple list selections."""
selected_items = self.list2.getcurselection() # tuple
selected_indices = self.list2.curselection() # tuple
text = 'list items=' + str(selected_items) + \
', indices=' + str(selected_indices)
self.status_line.configure(text=text)
def status_combobox(self, value):
text = 'combo box value = ' + str(value)
self.status_line.configure(text=text)
def status_radio1(self):
text = 'radiobutton variable = ' + self.radio_var.get()
self.status_line.configure(text=text)
def status_radio2(self, value):
text = 'Pmw check buttons: ' + value
self.status_line.configure(text=text)
def status_radio3(self, button_name, pressed):
if pressed: action = 'pressed'
else: action = 'released'
text = 'Pmw radio button ' + button_name + ' was ' + \
action + '; pressed buttons: ' + \
str(self.radio3.getcurselection())
self.status_line.configure(text=text)
def status_option(self, value):
self.status_line.configure(text='option menu = ' + value)
# or, since self.option_var is tied to the option menu,
# ...configure(text='option menu ' + self.option_var)
class TkinterPmwDemo:
def __init__(self, parent):
self.master = parent
self.balloon = Pmw.Balloon(self.master) # used for all balloon helps
# write messages about window actions in a common status label:
frame = Frame(self.master)
# pack frame with status label at the bottom:
frame.pack(side='bottom', anchor='w', fill='x', expand=True)
#Label(frame, text='Widget action response: ',
# font='helvetica 8', anchor='w').pack(side='left')
self.status_line = Label(frame, relief='groove', #relief='sunken',
font='helvetica 8', anchor='w')
# configure text later
self.status_line.pack(side='left', fill='x', expand=True)
self.pulldown_menus(self.master)
fields = InputFields(self.master, self.status_line,
balloon=self.balloon, scrolled=False)
fields.pack(side='top',padx=30,pady=20)
Button(self.master, text='Display widgets for list data',
command=self.list_dialog, width=29).pack(pady=2)
Button(self.master, text='Display the source code',
command=self.display_code, width=29).pack(pady=2)
# type q to quit:
self.master.bind('<q>', self.quit) # self.quit needs an event argument
def display_code(self):
self.display_file(sys.argv[0], self.master)
def pulldown_menus(self, parent):
self.menu_bar = Pmw.MenuBar(parent,
hull_relief='raised',
hull_borderwidth=1,
balloon=self.balloon,
hotkeys=True, # define accelerators
)
self.menu_bar.pack(fill='x')
self.menu_bar.addmenu('File', None, tearoff=True)
self.menu_bar.addmenuitem('File', 'command',
statusHelp='Open a file',
label='Open...',
command=self.file_read)
self.menu_bar.addmenuitem('File', 'command',
statusHelp='Save a file',
label='Save as...',
command=self.file_save)
self.menu_bar.addmenuitem('File', 'command',
statusHelp='Exit this application',
label='Quit',
command=self.quit)
self.menu_bar.addmenu('Dialogs',
'Demonstrate various Tk/Pmw dialog boxes', # balloon help
tearoff=True)
self.menu_bar.addmenuitem('Dialogs', 'command',
label='Tk confirmation dialog',
command=self.confirmation_dialog)
self.menu_bar.addmenuitem('Dialogs', 'command',
label='Tk message dialog',
command=self.Tk_message_dialog)
self.menu_bar.addmenuitem('Dialogs', 'command',
label='Pmw message dialog',
command=self.Pmw_message_dialog)
self.menu_bar.addmenuitem('Dialogs', 'command',
label='Pmw user-defined dialog',
command=self.userdef_dialog)
self.menu_bar.addcascademenu('Dialogs', 'Color dialogs',
statusHelp='Exemplify different color dialogs')
self.menu_bar.addmenuitem('Color dialogs', 'command',
label='Tk Color Dialog',
command=self.tk_color_dialog)
self.menu_bar.addmenuitem('Color dialogs', 'command',
label='Pynche color dialog',
command=self.pynche_color_dialog)
self.menu_bar.addmenu('Demo',
'Demonstrate various widgets and effects',
tearoff=True)
self.menu_bar.addmenuitem('Demo', 'command',
label='List data',
command=self.list_dialog)
self.menu_bar.addmenuitem('Demo', 'command',
label='Relief/borderwidth',
command=self.relief_dialog)
self.menu_bar.addmenuitem('Demo', 'command',
label='Bitmaps',
command=self.bitmap_dialog)
self.menu_bar.addmenu('Help', None, side='right')
self.menu_bar.addmenuitem('Help', 'command',
label='Tutorial',
command=self.tutorial)
self.balloon_on = IntVar(); self.balloon_on.set(1)
self.menu_bar.addmenuitem('Help', 'checkbutton',
label='Balloon help',
variable=self.balloon_on,
command=self.toggle_balloon)
def confirmation_dialog(self):
message = 'This is a demo of a Tk conformation dialog box'
ok = tkMessageBox.askokcancel('OK', message)
if ok:
self.status_line.configure(text="'Quit' was pressed")
else:
self.status_line.configure(text="'Cancel' was pressed")
def Tk_message_dialog(self):
message = 'This is a demo of a Tk message dialog box'
answer = tkMessageBox.Message(icon='info', type='ok',
message=message, title='About').show()
self.status_line.configure(text="'%s' was pressed" % answer)
def Pmw_message_dialog(self):
# message is typeset as a label so we need explicit newlines:
message = """\
This is a demo of the Pmw.MessageDialog box,
which is useful for writing longer text messages
to the user."""
Pmw.MessageDialog(self.master, title='Description',
buttons=('Quit',), message_text=message,
message_justify='left',
message_font='helvetica 12',
icon_bitmap='info',
# must be present if icon_bitmap is:
iconpos='w')
def userdef_dialog(self):
self.userdef_d = Pmw.Dialog(self.master,
title='Programmer-Defined Dialog',
buttons=('Apply', 'Cancel'),
#defaultbutton='Apply',
command=self.userdef_dialog_action)
self.userdef_d_gui = InputFields(self.userdef_d.interior(),
self.status_line,
self.balloon, scrolled=True)
self.userdef_d_gui.pack()
def userdef_dialog_action(self, result):
# result contains the name of the button that we clicked
if result == 'Apply':
# example on extracting dialog variables:
case = self.userdef_d_gui.case.get()
# (changing variables in self.gui are reflected in
# the self.status_line)
else:
text = 'you just canceled the dialog'
self.status_line.configure(text=text)
# does not work: self.dialog.deactivate(result)
self.userdef_d.destroy() # destroy dialog window
def file_read(self):
fname = tkFileDialog.Open(filetypes=[('anyfile','*')]).show()
text = 'chosen file to open: ' + os.path.basename(fname)
self.status_line.configure(text=text)
# the dialog checks the validity of the filename, but
# pressing Cancel results in an empty return string
if fname:
self.display_file(fname, self.master)
def display_file(self, filename, parent):
"""Read file into a text widget in a _separate_ window."""
filewindow = Toplevel(parent) # new window
f = open(filename, 'r'); filestr = f.read(); f.close()
# determine the number of lines and the max linewidth:
lines = filestr.split('\n')
nlines = len(lines)
maxwidth = max(map(lambda line: len(line), lines))
filetext = Pmw.ScrolledText(filewindow,
borderframe=5, # a bit space around the text
vscrollmode='dynamic', hscrollmode='dynamic',
labelpos='n', label_text='Contents of file '+filename,
text_width=min(80,maxwidth),
text_height=min(50,nlines),
text_wrap='none', # do not break lines
)
filetext.pack(expand=True, fill='both')
filetext.insert('end', filestr)
# add a quit button:
Button(filewindow, text='Quit',
command=filewindow.destroy).pack(pady=10)
# force the new window to be in focus:
filewindow.focus_set()
def file_save(self):
fname = tkFileDialog.SaveAs(
filetypes=[('temporary files','*.tmp')],
initialfile='myfile.tmp',
title='Save a file').show()
text = 'chosen file to save: "' + os.path.basename(fname) + '"'
self.status_line.configure(text=text)
def quit(self, event=None):
self.master.destroy()
def tk_color_dialog(self):
# see python src, subdirectory Lib/lib-tk
# and the tkColorChooser.py file
color = tkColorChooser.Chooser(
initialcolor='gray',title='Choose background color').show()
# or:
# color = tkColorChooser.askcolor()
# color[0] is now an (r,g,b) tuple and
# color[1] is a hexadecimal number; send the latter to
# tk_setPalette to change the background color:
# (when Cancel is pressed, color is (None,None))
if color[0] is not None:
self.master.tk_setPalette(color[1])
text = 'new background color is ' + str(color[0]) + \
' (rgb) or ' + str(color[1])
self.status_line.configure(text=text)
def pynche_color_dialog(self):
#from pynche import pyColorChooser
#color = pyColorChooser.askcolor(parent=self.master)
# or
import pynche.pyColorChooser
color = pynche.pyColorChooser.askcolor(self.master)
try:
self.master.tk_setPalette(color[1])
text = 'new background color is ' + str(color[0]) + \
' (rgb) or ' + color[1]
self.status_line.configure(text=text)
except: pass
def list_dialog(self):
self.list_d = Pmw.Dialog(self.master,
title='Demo of widgets for list data',
buttons=('Quit',), defaultbutton='Quit')
lists = InputLists(self.list_d.interior(), self.status_line,
balloon=self.balloon)
lists.pack(side='left')
def relief_dialog(self):
self.relief_d = Pmw.Dialog(self.master,
title='Demo of relief and borderwidth',
buttons=('Quit',), # (default)
defaultbutton='Quit')
self.reliefs_borderwidth(self.relief_d.interior())
def reliefs_borderwidth(self, parent):
# use a frame to align examples on various relief values:
frame = Frame(parent); frame.pack(side='top',pady=15)
# will use the grid geometry manager to pack widgets in this frame
reliefs = ('groove', 'raised', 'ridge', 'sunken', 'flat')
row = 0
for borderwidth in (0,2,4,6):
label = Label(frame, text='reliefs with borderwidth=%d: ' \
% borderwidth)
label.grid(row=row, column=0, sticky='w', pady=5)
for i in range(len(reliefs)):
l = Label(frame, text=reliefs[i], relief=reliefs[i],
borderwidth=borderwidth)
l.grid(row=row, column=i+1, padx=5, pady=5)
row += 1
def bitmap_dialog(self):
self.bitmap_d = Pmw.Dialog(self.master,
title='Demo of predefined bitmaps',
buttons=('Quit',),
defaultbutton='Quit',
command=self.bitmap_dialog_action)
self.bitmap_demo(self.bitmap_d.interior())
def bitmap_demo(self, parent):
# predefined bitmaps:
bitmaps = ('error', 'gray25', 'gray50', 'hourglass',
'info', 'questhead', 'question', 'warning')
Label(parent, text="""\
Predefined bitmaps, which can be used to
label dialogs (questions, info, etc.)""",
foreground='red').pack()
frame = Frame(parent); frame.pack(side='top', pady=5)
for i in range(len(bitmaps)): # write name of bitmaps
Label(frame, text=bitmaps[i]).grid(row=0, column=i+1)
for i in range(len(bitmaps)): # insert bitmaps
Label(frame, bitmap=bitmaps[i]).grid(row=1, column=i+1)
def bitmap_dialog_action(self, result):
# result contains the name of the button that we clicked
if result == 'Quit':
if tkMessageBox.askyesno('Yes', 'Are you sure you want to quit?'):
self.bitmap_d.destroy()
def tutorial(self):
self.tutorial_d = Pmw.Dialog(self.master,
title='Short explanation of this application',
buttons=('Quit',),
defaultbutton='Quit')
text = """\
This application demonstrates many of the most common widgets in
graphical user interfaces (with exception of the canvas widget).
The typical usage is to (i) launch
%s
(ii) find the desired widget, (iii) look up in the source code to
find the basic construction statements, (iv) try this in your own
GUI, (v) look up man page or textbook information to fine tune the
widget settings. In this way, the application might act as a
example-oriented quick reference.
Try these actions to test the widgets in the application:
1. Change one of the entry fields ("case name", "mass", etc.),
and press return to see the status field at the bottom of
the window being updated.
In the source code you can see how to call a function each
time the contents of a widget is changed. Such a function
also demonstrate how to extract the contents of a widget.
2. Choose an item from the option menu and watch the status line
at the bottom of the window.
3. Drag the slider.
4. Push the "Display widgets for list data" button to see
a collection of widgets for list data.
4. Visit the File, Dialogs, and Demo pull-down menus and try out
the various submenus.
""" % sys.argv[0]
# determine the number of lines and the max linewidth:
lines = text.split('\n'); nlines = len(lines)
maxwidth = max(map(lambda line: len(line), lines))
help = Pmw.ScrolledText(self.tutorial_d.interior(),
borderframe=5, # a bit space around the text
vscrollmode='dynamic', hscrollmode='dynamic',
labelpos='n',
label_text='How to make use of this application',
text_width=min(80,maxwidth), text_height=min(50,nlines),
text_wrap='none')
help.pack()
help.insert('end', text)
def toggle_balloon(self):
if self.balloon_on.get():
self.balloon.configure(state='both') # on
else:
self.balloon.configure(state='none') # off
# this one is not active; provides just an example:
def Tk_pulldown(self, parent):
"""
Demonstrate how to create a menu bar with basic Tk
components. This is a lower-level alternative to
Pmw.MenuBar.
"""
# pull-down menu:
self.pulldown = Menubutton(parent, text='Pulldown Menu',
relief='groove', underline=False)
self.pulldown.pack(side='left',padx=10)
# add entries in the 'Pulldown Menu' menu:
self.pulldown_menu = Menu(self.pulldown, tearoff=True)
# first menu entry:
self.pulldown_menu.add_command(label='Tk Confirmation Dialog',
command=self.confirmation_dialog, underline=0, accelerator='Alt+C')
self.pulldown_menu.add_command(label='Tk Message Dialog',
command=self.about_dialog, underline=0, accelerator='Alt+T')
self.pulldown_menu.add_command(label='Pmw Message Dialog',
command=self.message_dialog, underline=4, accelerator='Alt+M')
self.pulldown_menu.add_command(label='Pmw User-Defined Dialog',
command=self.userdef_dialog, underline=4, accelerator='Alt+U')
# add cascading menu, here an entry "File Dialogs"
# with two submenus, "Open" and "Save As":
self.file_menu = Menu(self.pulldown_menu, tearoff=True)
self.pulldown_menu.add_cascade(label='File Dialogs',
menu=self.file_menu, underline=0)
self.file_menu.add_command(label='Open',
command=self.file_read)
self.file_menu.add_command(label='Save As',
command=self.file_save)
# continue with main menu:
self.pulldown_menu.add('separator') # horizontal line
self.pulldown_menu.add_command(label='Tk Color Dialog',
command=self.tk_color_dialog)
self.pulldown_menu.add_command(label='Pynche Color Dialog',
command=self.pynche_color_dialog)
# set up a pointer from the menubutton back to the menu:
# (required for the pull-down menu to be displayed!)
self.pulldown['menu'] = self.pulldown_menu
# could be implemented:
# bind double-click to any widget to popping up a dialogbox with
# all configure options (cf. scrolledframe demo) in EntryFields
# with apply and quit buttons (apply: run configure and watch the effect)
def create_lists():
"""Launch a GUI consisting of class InputLists only."""
root = Tk()
Pmw.initialise(root)
status = Label(root)
widget = InputLists(root, status)
widget.pack()
status.pack() # get the status line below the widgets
root.mainloop()
def create_fields():
"""Launch a GUI consisting of class InputFields only."""
root = Tk()
Pmw.initialise(root)
status = Label(root)
widget = InputFields(root, status)
widget.pack()
status.pack() # get the status line below the widgets
root.mainloop()
def create_all():
"""Create the complete TkinterPmwDemo."""
root = Tk()
Pmw.initialise(root)
root.title('Tkinter/Pmw Demo')
# tk_strictMotif changes the file dialog in Tk's OpenFile etc.
#root.tk_strictMotif(1)
#Pmw.initialise(root,fontScheme='pmw1')
#import scitools.misc; scitools.misc.fontscheme1(root)
widget = TkinterPmwDemo(root)
# this widget packs itself...
root.mainloop()
# create demo in root window for testing
if __name__ == '__main__':
try:
if sys.argv[1] == 'lists':
create_lists()
elif sys.argv[1] == 'fields':
create_fields()
except:
create_all()
|
|
import time
import datetime
import re
import urllib
import subprocess
import os
import urlparse
import random
import pytz
import requests
from slugify import slugify
from django.core.handlers.wsgi import WSGIRequest
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import RequestSite, Site
from django.core.mail.backends.filebased import EmailBackend
from django.forms.utils import ErrorList
from django.contrib.staticfiles.storage import staticfiles_storage
from airmozilla.base.akamai_token_v2 import AkamaiToken
def roughly(number, variance_percentage=20):
"""return a number that is roughly what you inputted but
slightly smaller or slightly bigger.
For example, if you feed it 100, return 96 or 117 or 91 or 102.
Basically, take or add a certain percentage to the number.
This is useful if you stuff a lot of stuff in the cache and don't
want them all to expire at the same time but instead stagger
the expiration times a bit.
"""
percentage = random.randint(-variance_percentage, variance_percentage)
return int(number * (1 + percentage / 100.0))
def simplify_form_errors(errors):
copy = {}
for key, value in errors.items():
if isinstance(value, ErrorList):
value = list(value)
copy[key] = value
return copy
class EmlEmailBackend(EmailBackend):
"""
The django.core.mail.backends.filebased.EmailBackend backend
is neat but it creates the files as .log.
This makes it not possible to open the files in Postbox until
you rename them.
To use this, put this in your settings/local.py::
EMAIL_BACKEND = 'airmozilla.base.utils.EmlEmailBackend'
EMAIL_FILE_PATH = '/Users/peterbe/tmp/captured-emails/'
"""
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.eml" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
class EdgecastEncryptionError(Exception):
pass
def shorten_url(url):
"""return the URL shortened with Bit.ly"""
if not settings.BITLY_ACCESS_TOKEN:
raise ImproperlyConfigured('BITLY_ACCESS_TOKEN not set')
response = requests.get(
settings.BITLY_URL,
params={
'access_token': settings.BITLY_ACCESS_TOKEN,
'longUrl': url
}
)
result = response.json()
if result.get('status_code') == 500:
raise ValueError(result.get('status_txt'))
return result['data']['url']
def unique_slugify(data, models, duplicate_key='', lower=True, exclude=None):
"""Returns a unique slug string. If duplicate_key is provided, this is
appended for non-unique slugs before adding a count."""
slug_base = slugify(data)
if lower:
slug_base = slug_base.lower()
counter = 0
slug = slug_base
def query(model):
qs = model.objects.filter(slug=slug)
if exclude:
qs = qs.exclude(**exclude)
return qs
while any(query(model).exists() for model in models):
counter += 1
if counter == 1 and duplicate_key:
slug_base += '-' + duplicate_key
slug = slug_base
continue
slug = "%s-%i" % (slug_base, counter)
return slug
def tz_apply(dt, tz):
"""Returns a datetime with tz applied, timezone-aware.
Strips the Django-inserted timezone from settings.TIME_ZONE."""
dt = dt.replace(tzinfo=None)
return tz.normalize(tz.localize(dt))
def paginate(objects, page, count):
"""Returns a set of paginated objects, count per page (on #page)"""
__, objects_paged = paginator(objects, page, count)
return objects_paged
def paginator(objects, page, count):
"""return a Paginator instance and the objects paged"""
paginator_ = Paginator(objects, count)
try:
objects_paged = paginator_.page(page)
except PageNotAnInteger:
objects_paged = paginator_.page(1)
except EmptyPage:
objects_paged = paginator_.page(paginator_.num_pages)
return paginator_, objects_paged
def unhtml(text_with_html):
return re.sub('<.*?>', '', text_with_html)
def edgecast_tokenize(seconds=None, **kwargs):
if not settings.EDGECAST_SECURE_KEY: # pragma: no cover
raise ImproperlyConfigured(
"'EDGECAST_SECURE_KEY' not set up"
)
if seconds:
expires = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=seconds)
)
# EdgeCast unfortunately do their timestamps for `ec_expire` based on
# a local time rather than UTC.
# So you have to subtract 8 hours (or 7 depending on season) to get
# a timestamp that works.
tz = pytz.timezone('America/Los_Angeles')
expires += tz.utcoffset(expires)
expires_timestamp = time.mktime(expires.timetuple())
kwargs['ec_expire'] = int(expires_timestamp)
key = settings.EDGECAST_SECURE_KEY
binary_location = getattr(
settings,
'BINARY_LOCATION',
'ec_encrypt'
)
command = [
binary_location,
key,
urllib.urlencode(kwargs)
]
out, err = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
if not out and err:
raise EdgecastEncryptionError(err)
return out.strip()
def akamai_tokenize(
window_seconds=180,
token_name='hdnea',
start_time='now',
ip=None,
url=None,
access_list='/*/*_Restricted/*',
key=None,
escape_early=False,
verbose=False,
**other
):
config = other
config['key'] = key or settings.AKAMAI_SECURE_KEY
assert config['key'], "no key set up"
config['window_seconds'] = window_seconds
config['start_time'] = start_time
config['token_name'] = token_name
config['ip'] = ip
config['acl'] = access_list
config['verbose'] = verbose
config['escape_early'] = escape_early
generator = AkamaiToken(**config)
token = generator.generateToken()
return token
def fix_base_url(base_url):
"""because most of the functions in this file can take either a
base_url (string) or a request, we make this easy with a quick
fixing function."""
if isinstance(base_url, WSGIRequest):
request = base_url
protocol = 'https' if request.is_secure() else 'http'
base_url = '%s://%s' % (protocol, RequestSite(request).domain)
return base_url
class _DotDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self._recurse(self)
def _recurse(self, item):
for key, value in item.iteritems():
if isinstance(value, dict):
item[key] = _DotDict(value)
def __getattr__(self, key):
if key.startswith('__'):
raise AttributeError(key)
return self[key]
def dot_dict(d):
return _DotDict(d)
def get_abs_static(path, request):
path = staticfiles_storage.url(path)
prefix = request.is_secure() and 'https' or 'http'
if path.startswith('/') and not path.startswith('//'):
# e.g. '/media/foo.png'
root_url = get_base_url(request)
path = root_url + path
if path.startswith('//'):
path = '%s:%s' % (prefix, path)
assert path.startswith('http://') or path.startswith('https://')
return path
def get_base_url(request):
return (
'%s://%s' % (
request.is_secure() and 'https' or 'http',
RequestSite(request).domain
)
)
def prepare_vidly_video_url(url):
"""Return the URL prepared for Vid.ly
See # See http://help.encoding.com/knowledge-base/article/\
save-some-time-on-your-encodes/
Hopefully this will make the transcoding faster.
"""
if 's3.amazonaws.com' in url:
if '?' in url:
url += '&'
else:
url += '?'
url += 'nocopy'
return url
def build_absolute_url(uri):
site = Site.objects.get_current()
base = 'https://%s' % site.domain # yuck!
return urlparse.urljoin(base, uri)
|
|
#!/usr/bin/env python
__description__ = 'Tool to test a PDF file'
__author__ = 'Didier Stevens'
__version__ = '0.2.4'
__date__ = '2018/01/29'
"""
Tool to test a PDF file
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2009/03/27: start
2009/03/28: scan option
2009/03/29: V0.0.2: xml output
2009/03/31: V0.0.3: /ObjStm suggested by Dion
2009/04/02: V0.0.4: added ErrorMessage
2009/04/20: V0.0.5: added Dates
2009/04/21: V0.0.6: added entropy
2009/04/22: added disarm
2009/04/29: finished disarm
2009/05/13: V0.0.7: added cPDFEOF
2009/07/24: V0.0.8: added /AcroForm and /RichMedia, simplified %PDF header regex, extra date format (without TZ)
2009/07/25: added input redirection, option --force
2009/10/13: V0.0.9: added detection for CVE-2009-3459; added /RichMedia to disarm
2010/01/11: V0.0.10: relaxed %PDF header checking
2010/04/28: V0.0.11: added /Launch
2010/09/21: V0.0.12: fixed cntCharsAfterLastEOF bug; fix by Russell Holloway
2011/12/29: updated for Python 3, added keyword /EmbeddedFile
2012/03/03: added PDFiD2JSON; coded by Brandon Dixon
2013/02/10: V0.1.0: added http/https support; added support for ZIP file with password 'infected'
2013/03/11: V0.1.1: fixes for Python 3
2013/03/13: V0.1.2: Added error handling for files; added /XFA
2013/11/01: V0.2.0: Added @file & plugins
2013/11/02: continue
2013/11/04: added options -c, -m, -v
2013/11/06: added option -S
2013/11/08: continue
2013/11/09: added option -o
2013/11/15: refactoring
2014/09/30: added CSV header
2014/10/16: V0.2.1: added output when plugin & file not pdf
2014/10/18: some fixes for Python 3
2015/08/12: V0.2.2: added option pluginoptions
2015/08/13: added plugin Instructions method
2016/04/12: added option literal
2017/10/29: added pdfid.ini support
2017/11/05: V0.2.3: added option -n
2018/01/03: V0.2.4: bugfix entropy calculation for PDFs without streams; sample 28cb208d976466b295ee879d2d233c8a https://twitter.com/DubinRan/status/947783629123416069
2018/01/15: bugfix ConfigParser privately reported
2018/01/29: bugfix oPDFEOF.cntCharsAfterLastEOF when no %%EOF
Todo:
- update XML example (entropy, EOF)
- code review, cleanup
"""
import collections
import glob
import json
import math
import operator
import optparse
import os
import os.path
import re
import sys
import traceback
import xml.dom.minidom
import zipfile
if sys.version_info[0] >= 3:
import urllib.request as urllib23
else:
import urllib2 as urllib23
if sys.version_info[0] >= 3:
import configparser as ConfigParser
else:
import ConfigParser
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
class cBinaryFile:
def __init__(self, file):
self.file = file
if file == '':
self.infile = sys.stdin
elif file.lower().startswith('http://') or file.lower().startswith('https://'):
try:
if sys.hexversion >= 0x020601F0:
self.infile = urllib23.urlopen(file, timeout=5)
else:
self.infile = urllib23.urlopen(file)
except urllib23.HTTPError:
print('Error accessing URL %s' % file)
print(sys.exc_info()[1])
sys.exit()
elif file.lower().endswith('.zip'):
try:
self.zipfile = zipfile.ZipFile(file, 'r')
self.infile = self.zipfile.open(self.zipfile.infolist()[0], 'r', C2BIP3('infected'))
except:
print('Error opening file %s' % file)
print(sys.exc_info()[1])
sys.exit()
else:
try:
self.infile = open(file, 'rb')
except:
print('Error opening file %s' % file)
print(sys.exc_info()[1])
sys.exit()
self.ungetted = []
def byte(self):
if len(self.ungetted) != 0:
return self.ungetted.pop()
inbyte = self.infile.read(1)
if not inbyte or inbyte == '':
self.infile.close()
return None
return ord(inbyte)
def bytes(self, size):
if size <= len(self.ungetted):
result = self.ungetted[0:size]
del self.ungetted[0:size]
return result
inbytes = self.infile.read(size - len(self.ungetted))
if inbytes == '':
self.infile.close()
if type(inbytes) == type(''):
result = self.ungetted + [ord(b) for b in inbytes]
else:
result = self.ungetted + [b for b in inbytes]
self.ungetted = []
return result
def unget(self, byte):
self.ungetted.append(byte)
def ungets(self, bytes):
bytes.reverse()
self.ungetted.extend(bytes)
class cPDFDate:
def __init__(self):
self.state = 0
def parse(self, char):
if char == 'D':
self.state = 1
return None
elif self.state == 1:
if char == ':':
self.state = 2
self.digits1 = ''
else:
self.state = 0
return None
elif self.state == 2:
if len(self.digits1) < 14:
if char >= '0' and char <= '9':
self.digits1 += char
return None
else:
self.state = 0
return None
elif char == '+' or char == '-' or char == 'Z':
self.state = 3
self.digits2 = ''
self.TZ = char
return None
elif char == '"':
self.state = 0
self.date = 'D:' + self.digits1
return self.date
elif char < '0' or char > '9':
self.state = 0
self.date = 'D:' + self.digits1
return self.date
else:
self.state = 0
return None
elif self.state == 3:
if len(self.digits2) < 2:
if char >= '0' and char <= '9':
self.digits2 += char
return None
else:
self.state = 0
return None
elif len(self.digits2) == 2:
if char == "'":
self.digits2 += char
return None
else:
self.state = 0
return None
elif len(self.digits2) < 5:
if char >= '0' and char <= '9':
self.digits2 += char
if len(self.digits2) == 5:
self.state = 0
self.date = 'D:' + self.digits1 + self.TZ + self.digits2
return self.date
else:
return None
else:
self.state = 0
return None
def fEntropy(countByte, countTotal):
x = float(countByte) / countTotal
if x > 0:
return -x * math.log(x, 2)
else:
return 0.0
class cEntropy:
def __init__(self):
self.allBucket = [0 for i in range(0, 256)]
self.streamBucket = [0 for i in range(0, 256)]
def add(self, byte, insideStream):
self.allBucket[byte] += 1
if insideStream:
self.streamBucket[byte] += 1
def removeInsideStream(self, byte):
if self.streamBucket[byte] > 0:
self.streamBucket[byte] -= 1
def calc(self):
self.nonStreamBucket = map(operator.sub, self.allBucket, self.streamBucket)
allCount = sum(self.allBucket)
streamCount = sum(self.streamBucket)
nonStreamCount = sum(self.nonStreamBucket)
if streamCount == 0:
return (allCount, sum(map(lambda x: fEntropy(x, allCount), self.allBucket)), streamCount, None,
nonStreamCount, sum(map(lambda x: fEntropy(x, nonStreamCount), self.nonStreamBucket)))
else:
return (allCount, sum(map(lambda x: fEntropy(x, allCount), self.allBucket)), streamCount,
sum(map(lambda x: fEntropy(x, streamCount), self.streamBucket)), nonStreamCount,
sum(map(lambda x: fEntropy(x, nonStreamCount), self.nonStreamBucket)))
class cPDFEOF:
def __init__(self):
self.token = ''
self.cntEOFs = 0
def parse(self, char):
if self.cntEOFs > 0:
self.cntCharsAfterLastEOF += 1
if self.token == '' and char == '%':
self.token += char
return
elif self.token == '%' and char == '%':
self.token += char
return
elif self.token == '%%' and char == 'E':
self.token += char
return
elif self.token == '%%E' and char == 'O':
self.token += char
return
elif self.token == '%%EO' and char == 'F':
self.token += char
return
elif self.token == '%%EOF' and (char == '\n' or char == '\r' or char == ' ' or char == '\t'):
self.cntEOFs += 1
self.cntCharsAfterLastEOF = 0
if char == '\n':
self.token = ''
else:
self.token += char
return
elif self.token == '%%EOF\r':
if char == '\n':
self.cntCharsAfterLastEOF = 0
self.token = ''
else:
self.token = ''
def FindPDFHeaderRelaxed(oBinaryFile):
bytes = oBinaryFile.bytes(1024)
index = ''.join([chr(byte) for byte in bytes]).find('%PDF')
if index == -1:
oBinaryFile.ungets(bytes)
return ([], None)
for endHeader in range(index + 4, index + 4 + 10):
if bytes[endHeader] == 10 or bytes[endHeader] == 13:
break
oBinaryFile.ungets(bytes[endHeader:])
return (bytes[0:endHeader], ''.join([chr(byte) for byte in bytes[index:endHeader]]))
def Hexcode2String(char):
if type(char) == int:
return '#%02x' % char
else:
return char
def SwapCase(char):
if type(char) == int:
return ord(chr(char).swapcase())
else:
return char.swapcase()
def HexcodeName2String(hexcodeName):
return ''.join(map(Hexcode2String, hexcodeName))
def SwapName(wordExact):
return map(SwapCase, wordExact)
def UpdateWords(word, wordExact, slash, words, hexcode, allNames, lastName, insideStream, oEntropy, fOut):
if word != '':
if slash + word in words:
words[slash + word][0] += 1
if hexcode:
words[slash + word][1] += 1
elif slash == '/' and allNames:
words[slash + word] = [1, 0]
if hexcode:
words[slash + word][1] += 1
if slash == '/':
lastName = slash + word
if slash == '':
if word == 'stream':
insideStream = True
if word == 'endstream':
if insideStream == True and oEntropy != None:
for char in 'endstream':
oEntropy.removeInsideStream(ord(char))
insideStream = False
if fOut != None:
if slash == '/' and '/' + word in ('/JS', '/JavaScript', '/AA', '/OpenAction', '/JBIG2Decode', '/RichMedia',
'/Launch'):
wordExactSwapped = HexcodeName2String(SwapName(wordExact))
fOut.write(C2BIP3(wordExactSwapped))
print('/%s -> /%s' % (HexcodeName2String(wordExact), wordExactSwapped))
else:
fOut.write(C2BIP3(HexcodeName2String(wordExact)))
return ('', [], False, lastName, insideStream)
class cCVE_2009_3459:
def __init__(self):
self.count = 0
def Check(self, lastName, word):
if (lastName == '/Colors' and word.isdigit() and
int(word) > 2 ^ 24): # decided to alert when the number of colors is expressed with more than 3 bytes
self.count += 1
def XMLAddAttribute(xmlDoc, name, value=None):
att = xmlDoc.createAttribute(name)
xmlDoc.documentElement.setAttributeNode(att)
if value != None:
att.nodeValue = value
return att
def ParseINIFile():
oConfigParser = ConfigParser.ConfigParser(allow_no_value=True)
oConfigParser.optionxform = str
oConfigParser.read(os.path.join(os.path.dirname(sys.argv[0]), 'pdfid.ini'))
keywords = []
if oConfigParser.has_section('keywords'):
for key, value in oConfigParser.items('keywords'):
if not key in keywords:
keywords.append(key)
return keywords
def PDFiD(file, allNames=False, extraData=False, disarm=False, force=False):
"""Example of XML output:
<PDFiD ErrorOccured="False" ErrorMessage="" Filename="test.pdf" Header="%PDF-1.1" IsPDF="True" Version="0.0.4" Entropy="4.28">
<Keywords>
<Keyword Count="7" HexcodeCount="0" Name="obj"/>
<Keyword Count="7" HexcodeCount="0" Name="endobj"/>
<Keyword Count="1" HexcodeCount="0" Name="stream"/>
<Keyword Count="1" HexcodeCount="0" Name="endstream"/>
<Keyword Count="1" HexcodeCount="0" Name="xref"/>
<Keyword Count="1" HexcodeCount="0" Name="trailer"/>
<Keyword Count="1" HexcodeCount="0" Name="startxref"/>
<Keyword Count="1" HexcodeCount="0" Name="/Page"/>
<Keyword Count="0" HexcodeCount="0" Name="/Encrypt"/>
<Keyword Count="1" HexcodeCount="0" Name="/JS"/>
<Keyword Count="1" HexcodeCount="0" Name="/JavaScript"/>
<Keyword Count="0" HexcodeCount="0" Name="/AA"/>
<Keyword Count="1" HexcodeCount="0" Name="/OpenAction"/>
<Keyword Count="0" HexcodeCount="0" Name="/JBIG2Decode"/>
</Keywords>
<Dates>
<Date Value="D:20090128132916+01'00" Name="/ModDate"/>
</Dates>
</PDFiD>
"""
word = ''
wordExact = []
hexcode = False
lastName = ''
insideStream = False
keywords = [
'obj',
'endobj',
'stream',
'endstream',
'xref',
'trailer',
'startxref',
'/Page',
'/Encrypt',
'/ObjStm',
'/JS',
'/JavaScript',
'/AA',
'/OpenAction',
'/AcroForm',
'/JBIG2Decode',
'/RichMedia',
'/Launch',
'/EmbeddedFile',
'/XFA',
]
words = {}
dates = []
for extrakeyword in ParseINIFile():
if not extrakeyword in keywords:
keywords.append(extrakeyword)
for keyword in keywords:
words[keyword] = [0, 0]
slash = ''
xmlDoc = xml.dom.minidom.getDOMImplementation().createDocument(None, 'PDFiD', None)
XMLAddAttribute(xmlDoc, 'Version', __version__)
XMLAddAttribute(xmlDoc, 'Filename', file)
attErrorOccured = XMLAddAttribute(xmlDoc, 'ErrorOccured', 'False')
attErrorMessage = XMLAddAttribute(xmlDoc, 'ErrorMessage', '')
oPDFDate = None
oEntropy = None
oPDFEOF = None
oCVE_2009_3459 = cCVE_2009_3459()
try:
attIsPDF = xmlDoc.createAttribute('IsPDF')
xmlDoc.documentElement.setAttributeNode(attIsPDF)
oBinaryFile = cBinaryFile(file)
if extraData:
oPDFDate = cPDFDate()
oEntropy = cEntropy()
oPDFEOF = cPDFEOF()
(bytesHeader, pdfHeader) = FindPDFHeaderRelaxed(oBinaryFile)
if disarm:
(pathfile, extension) = os.path.splitext(file)
fOut = open(pathfile + '.disarmed' + extension, 'wb')
for byteHeader in bytesHeader:
fOut.write(C2BIP3(chr(byteHeader)))
else:
fOut = None
if oEntropy != None:
for byteHeader in bytesHeader:
oEntropy.add(byteHeader, insideStream)
if pdfHeader == None and not force:
attIsPDF.nodeValue = 'False'
return xmlDoc
else:
if pdfHeader == None:
attIsPDF.nodeValue = 'False'
pdfHeader = ''
else:
attIsPDF.nodeValue = 'True'
att = xmlDoc.createAttribute('Header')
att.nodeValue = repr(pdfHeader[0:10]).strip("'")
xmlDoc.documentElement.setAttributeNode(att)
byte = oBinaryFile.byte()
while byte != None:
char = chr(byte)
charUpper = char.upper()
if charUpper >= 'A' and charUpper <= 'Z' or charUpper >= '0' and charUpper <= '9':
word += char
wordExact.append(char)
elif slash == '/' and char == '#':
d1 = oBinaryFile.byte()
if d1 != None:
d2 = oBinaryFile.byte()
if d2 != None and (chr(d1) >= '0' and chr(d1) <= '9' or chr(d1).upper() >= 'A' and
chr(d1).upper() <= 'F') and (chr(d2) >= '0' and chr(d2) <= '9' or
chr(d2).upper() >= 'A' and chr(d2).upper() <= 'F'):
word += chr(int(chr(d1) + chr(d2), 16))
wordExact.append(int(chr(d1) + chr(d2), 16))
hexcode = True
if oEntropy != None:
oEntropy.add(d1, insideStream)
oEntropy.add(d2, insideStream)
if oPDFEOF != None:
oPDFEOF.parse(d1)
oPDFEOF.parse(d2)
else:
oBinaryFile.unget(d2)
oBinaryFile.unget(d1)
(word, wordExact, hexcode, lastName, insideStream) = UpdateWords(
word, wordExact, slash, words, hexcode, allNames, lastName, insideStream, oEntropy, fOut)
if disarm:
fOut.write(C2BIP3(char))
else:
oBinaryFile.unget(d1)
(word, wordExact, hexcode, lastName, insideStream) = UpdateWords(
word, wordExact, slash, words, hexcode, allNames, lastName, insideStream, oEntropy, fOut)
if disarm:
fOut.write(C2BIP3(char))
else:
oCVE_2009_3459.Check(lastName, word)
(word, wordExact, hexcode, lastName, insideStream) = UpdateWords(
word, wordExact, slash, words, hexcode, allNames, lastName, insideStream, oEntropy, fOut)
if char == '/':
slash = '/'
else:
slash = ''
if disarm:
fOut.write(C2BIP3(char))
if oPDFDate != None and oPDFDate.parse(char) != None:
dates.append([oPDFDate.date, lastName])
if oEntropy != None:
oEntropy.add(byte, insideStream)
if oPDFEOF != None:
oPDFEOF.parse(char)
byte = oBinaryFile.byte()
(word, wordExact, hexcode, lastName, insideStream) = UpdateWords(
word, wordExact, slash, words, hexcode, allNames, lastName, insideStream, oEntropy, fOut)
# check to see if file ended with %%EOF. If so, we can reset charsAfterLastEOF and add one to EOF count. This is never performed in
# the parse function because it never gets called due to hitting the end of file.
if byte == None and oPDFEOF != None:
if oPDFEOF.token == '%%EOF':
oPDFEOF.cntEOFs += 1
oPDFEOF.cntCharsAfterLastEOF = 0
oPDFEOF.token = ''
except SystemExit:
sys.exit()
except:
attErrorOccured.nodeValue = 'True'
attErrorMessage.nodeValue = traceback.format_exc()
if disarm:
fOut.close()
attEntropyAll = xmlDoc.createAttribute('TotalEntropy')
xmlDoc.documentElement.setAttributeNode(attEntropyAll)
attCountAll = xmlDoc.createAttribute('TotalCount')
xmlDoc.documentElement.setAttributeNode(attCountAll)
attEntropyStream = xmlDoc.createAttribute('StreamEntropy')
xmlDoc.documentElement.setAttributeNode(attEntropyStream)
attCountStream = xmlDoc.createAttribute('StreamCount')
xmlDoc.documentElement.setAttributeNode(attCountStream)
attEntropyNonStream = xmlDoc.createAttribute('NonStreamEntropy')
xmlDoc.documentElement.setAttributeNode(attEntropyNonStream)
attCountNonStream = xmlDoc.createAttribute('NonStreamCount')
xmlDoc.documentElement.setAttributeNode(attCountNonStream)
if oEntropy != None:
(countAll, entropyAll, countStream, entropyStream, countNonStream, entropyNonStream) = oEntropy.calc()
attEntropyAll.nodeValue = '%f' % entropyAll
attCountAll.nodeValue = '%d' % countAll
if entropyStream == None:
attEntropyStream.nodeValue = 'N/A '
else:
attEntropyStream.nodeValue = '%f' % entropyStream
attCountStream.nodeValue = '%d' % countStream
attEntropyNonStream.nodeValue = '%f' % entropyNonStream
attCountNonStream.nodeValue = '%d' % countNonStream
else:
attEntropyAll.nodeValue = ''
attCountAll.nodeValue = ''
attEntropyStream.nodeValue = ''
attCountStream.nodeValue = ''
attEntropyNonStream.nodeValue = ''
attCountNonStream.nodeValue = ''
attCountEOF = xmlDoc.createAttribute('CountEOF')
xmlDoc.documentElement.setAttributeNode(attCountEOF)
attCountCharsAfterLastEOF = xmlDoc.createAttribute('CountCharsAfterLastEOF')
xmlDoc.documentElement.setAttributeNode(attCountCharsAfterLastEOF)
if oPDFEOF != None:
attCountEOF.nodeValue = '%d' % oPDFEOF.cntEOFs
if oPDFEOF.cntEOFs > 0:
attCountCharsAfterLastEOF.nodeValue = '%d' % oPDFEOF.cntCharsAfterLastEOF
else:
attCountCharsAfterLastEOF.nodeValue = ''
else:
attCountEOF.nodeValue = ''
attCountCharsAfterLastEOF.nodeValue = ''
eleKeywords = xmlDoc.createElement('Keywords')
xmlDoc.documentElement.appendChild(eleKeywords)
for keyword in keywords:
eleKeyword = xmlDoc.createElement('Keyword')
eleKeywords.appendChild(eleKeyword)
att = xmlDoc.createAttribute('Name')
att.nodeValue = keyword
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('Count')
att.nodeValue = str(words[keyword][0])
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('HexcodeCount')
att.nodeValue = str(words[keyword][1])
eleKeyword.setAttributeNode(att)
eleKeyword = xmlDoc.createElement('Keyword')
eleKeywords.appendChild(eleKeyword)
att = xmlDoc.createAttribute('Name')
att.nodeValue = '/Colors > 2^24'
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('Count')
att.nodeValue = str(oCVE_2009_3459.count)
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('HexcodeCount')
att.nodeValue = str(0)
eleKeyword.setAttributeNode(att)
if allNames:
keys = sorted(words.keys())
for word in keys:
if not word in keywords:
eleKeyword = xmlDoc.createElement('Keyword')
eleKeywords.appendChild(eleKeyword)
att = xmlDoc.createAttribute('Name')
att.nodeValue = word
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('Count')
att.nodeValue = str(words[word][0])
eleKeyword.setAttributeNode(att)
att = xmlDoc.createAttribute('HexcodeCount')
att.nodeValue = str(words[word][1])
eleKeyword.setAttributeNode(att)
eleDates = xmlDoc.createElement('Dates')
xmlDoc.documentElement.appendChild(eleDates)
dates.sort(key=lambda x: x[0])
for date in dates:
eleDate = xmlDoc.createElement('Date')
eleDates.appendChild(eleDate)
att = xmlDoc.createAttribute('Value')
att.nodeValue = date[0]
eleDate.setAttributeNode(att)
att = xmlDoc.createAttribute('Name')
att.nodeValue = date[1]
eleDate.setAttributeNode(att)
return xmlDoc
def PDFiD2String(xmlDoc, nozero, force):
result = 'PDFiD %s %s\n' % (xmlDoc.documentElement.getAttribute('Version'),
xmlDoc.documentElement.getAttribute('Filename'))
if xmlDoc.documentElement.getAttribute('ErrorOccured') == 'True':
return result + '***Error occured***\n%s\n' % xmlDoc.documentElement.getAttribute('ErrorMessage')
if not force and xmlDoc.documentElement.getAttribute('IsPDF') == 'False':
return result + ' Not a PDF document\n'
result += ' PDF Header: %s\n' % xmlDoc.documentElement.getAttribute('Header')
for node in xmlDoc.documentElement.getElementsByTagName('Keywords')[0].childNodes:
if not nozero or nozero and int(node.getAttribute('Count')) > 0:
result += ' %-16s %7d' % (node.getAttribute('Name'), int(node.getAttribute('Count')))
if int(node.getAttribute('HexcodeCount')) > 0:
result += '(%d)' % int(node.getAttribute('HexcodeCount'))
result += '\n'
if xmlDoc.documentElement.getAttribute('CountEOF') != '':
result += ' %-16s %7d\n' % ('%%EOF', int(xmlDoc.documentElement.getAttribute('CountEOF')))
if xmlDoc.documentElement.getAttribute('CountCharsAfterLastEOF') != '':
result += ' %-16s %7d\n' % ('After last %%EOF',
int(xmlDoc.documentElement.getAttribute('CountCharsAfterLastEOF')))
for node in xmlDoc.documentElement.getElementsByTagName('Dates')[0].childNodes:
result += ' %-23s %s\n' % (node.getAttribute('Value'), node.getAttribute('Name'))
if xmlDoc.documentElement.getAttribute('TotalEntropy') != '':
result += ' Total entropy: %s (%10s bytes)\n' % (xmlDoc.documentElement.getAttribute('TotalEntropy'),
xmlDoc.documentElement.getAttribute('TotalCount'))
if xmlDoc.documentElement.getAttribute('StreamEntropy') != '':
result += ' Entropy inside streams: %s (%10s bytes)\n' % (xmlDoc.documentElement.getAttribute('StreamEntropy'),
xmlDoc.documentElement.getAttribute('StreamCount'))
if xmlDoc.documentElement.getAttribute('NonStreamEntropy') != '':
result += ' Entropy outside streams: %s (%10s bytes)\n' % (xmlDoc.documentElement.getAttribute(
'NonStreamEntropy'), xmlDoc.documentElement.getAttribute('NonStreamCount'))
return result
class cCount():
def __init__(self, count, hexcode):
self.count = count
self.hexcode = hexcode
class cPDFiD():
def __init__(self, xmlDoc, force):
self.version = xmlDoc.documentElement.getAttribute('Version')
self.filename = xmlDoc.documentElement.getAttribute('Filename')
self.errorOccured = xmlDoc.documentElement.getAttribute('ErrorOccured') == 'True'
self.errorMessage = xmlDoc.documentElement.getAttribute('ErrorMessage')
self.isPDF = None
if self.errorOccured:
return
self.isPDF = xmlDoc.documentElement.getAttribute('IsPDF') == 'True'
if not force and not self.isPDF:
return
self.header = xmlDoc.documentElement.getAttribute('Header')
self.keywords = {}
for node in xmlDoc.documentElement.getElementsByTagName('Keywords')[0].childNodes:
self.keywords[node.getAttribute('Name')] = cCount(
int(node.getAttribute('Count')), int(node.getAttribute('HexcodeCount')))
self.obj = self.keywords['obj']
self.endobj = self.keywords['endobj']
self.stream = self.keywords['stream']
self.endstream = self.keywords['endstream']
self.xref = self.keywords['xref']
self.trailer = self.keywords['trailer']
self.startxref = self.keywords['startxref']
self.page = self.keywords['/Page']
self.encrypt = self.keywords['/Encrypt']
self.objstm = self.keywords['/ObjStm']
self.js = self.keywords['/JS']
self.javascript = self.keywords['/JavaScript']
self.aa = self.keywords['/AA']
self.openaction = self.keywords['/OpenAction']
self.acroform = self.keywords['/AcroForm']
self.jbig2decode = self.keywords['/JBIG2Decode']
self.richmedia = self.keywords['/RichMedia']
self.launch = self.keywords['/Launch']
self.embeddedfile = self.keywords['/EmbeddedFile']
self.xfa = self.keywords['/XFA']
self.colors_gt_2_24 = self.keywords['/Colors > 2^24']
self.non_stream_entropy = float(xmlDoc.documentElement.getAttribute('NonStreamEntropy'))
try:
self.last_eof_bytes = int((xmlDoc.documentElement.getAttribute('CountCharsAfterLastEOF')))
except:
self.last_eof_bytes = 0
def Print(lines, options):
print(lines)
filename = None
if options.scan:
filename = 'PDFiD.log'
if options.output != '':
filename = options.output
if filename:
logfile = open(filename, 'a')
logfile.write(lines + '\n')
logfile.close()
def Quote(value, separator, quote):
if isinstance(value, str):
if separator in value:
return quote + value + quote
return value
def MakeCSVLine(fields, separator=';', quote='"'):
formatstring = separator.join([field[0] for field in fields])
strings = [Quote(field[1], separator, quote) for field in fields]
return formatstring % tuple(strings)
def ProcessFile(filename, options, plugins):
xmlDoc = PDFiD(filename, options.all, options.extra, options.disarm, options.force)
if plugins == [] and options.select == '':
Print(PDFiD2String(xmlDoc, options.nozero, options.force), options)
return
oPDFiD = cPDFiD(xmlDoc, options.force)
if options.select:
if options.force or not oPDFiD.errorOccured and oPDFiD.isPDF:
pdf = oPDFiD
try:
selected = eval(options.select)
except Exception as e:
Print('Error evaluating select expression: %s' % options.select, options)
if options.verbose:
raise e
return
if selected:
if options.csv:
Print(filename, options)
else:
Print(PDFiD2String(xmlDoc, options.nozero, options.force), options)
else:
for cPlugin in plugins:
if not cPlugin.onlyValidPDF or not oPDFiD.errorOccured and oPDFiD.isPDF:
try:
oPlugin = cPlugin(oPDFiD, options.pluginoptions)
except Exception as e:
Print('Error instantiating plugin: %s' % cPlugin.name, options)
if options.verbose:
raise e
return
try:
score = oPlugin.Score()
except Exception as e:
Print('Error running plugin: %s' % cPlugin.name, options)
if options.verbose:
raise e
return
if options.csv:
if score >= options.minimumscore:
Print(MakeCSVLine((('%s', filename), ('%s', cPlugin.name), ('%.02f', score))), options)
else:
if score >= options.minimumscore:
Print(PDFiD2String(xmlDoc, options.nozero, options.force), options)
Print('%s score: %.02f' % (cPlugin.name, score), options)
try:
Print('%s instructions: %s' % (cPlugin.name, oPlugin.Instructions(score)), options)
except AttributeError:
pass
else:
if options.csv:
if oPDFiD.errorOccured:
Print(MakeCSVLine((('%s', filename), ('%s', cPlugin.name), ('%s', 'Error occured'))), options)
if not oPDFiD.isPDF:
Print(
MakeCSVLine((('%s', filename), ('%s', cPlugin.name), ('%s', 'Not a PDF document'))),
options)
else:
Print(PDFiD2String(xmlDoc, options.nozero, options.force), options)
def Scan(directory, options, plugins):
try:
if os.path.isdir(directory):
for entry in os.listdir(directory):
Scan(os.path.join(directory, entry), options, plugins)
else:
ProcessFile(directory, options, plugins)
except Exception as e:
# print directory
print(e)
# print(sys.exc_info()[2])
# print traceback.format_exc()
#function derived from: http://blog.9bplus.com/pdfidpy-output-to-json
def PDFiD2JSON(xmlDoc, force):
#Get Top Layer Data
errorOccured = xmlDoc.documentElement.getAttribute('ErrorOccured')
errorMessage = xmlDoc.documentElement.getAttribute('ErrorMessage')
filename = xmlDoc.documentElement.getAttribute('Filename')
header = xmlDoc.documentElement.getAttribute('Header')
isPdf = xmlDoc.documentElement.getAttribute('IsPDF')
version = xmlDoc.documentElement.getAttribute('Version')
entropy = xmlDoc.documentElement.getAttribute('Entropy')
#extra data
countEof = xmlDoc.documentElement.getAttribute('CountEOF')
countChatAfterLastEof = xmlDoc.documentElement.getAttribute('CountCharsAfterLastEOF')
totalEntropy = xmlDoc.documentElement.getAttribute('TotalEntropy')
streamEntropy = xmlDoc.documentElement.getAttribute('StreamEntropy')
nonStreamEntropy = xmlDoc.documentElement.getAttribute('NonStreamEntropy')
keywords = []
dates = []
#grab all keywords
for node in xmlDoc.documentElement.getElementsByTagName('Keywords')[0].childNodes:
name = node.getAttribute('Name')
count = int(node.getAttribute('Count'))
if int(node.getAttribute('HexcodeCount')) > 0:
hexCount = int(node.getAttribute('HexcodeCount'))
else:
hexCount = 0
keyword = {'count': count, 'hexcodecount': hexCount, 'name': name}
keywords.append(keyword)
#grab all date information
for node in xmlDoc.documentElement.getElementsByTagName('Dates')[0].childNodes:
name = node.getAttribute('Name')
value = node.getAttribute('Value')
date = {'name': name, 'value': value}
dates.append(date)
data = {
'countEof': countEof,
'countChatAfterLastEof': countChatAfterLastEof,
'totalEntropy': totalEntropy,
'streamEntropy': streamEntropy,
'nonStreamEntropy': nonStreamEntropy,
'errorOccured': errorOccured,
'errorMessage': errorMessage,
'filename': filename,
'header': header,
'isPdf': isPdf,
'version': version,
'entropy': entropy,
'keywords': {
'keyword': keywords
},
'dates': {
'date': dates
}
}
complete = [{'pdfid': data}]
result = json.dumps(complete)
return result
def File2Strings(filename):
try:
f = open(filename, 'r')
except:
return None
try:
return list(map(lambda line: line.rstrip('\n'), f.readlines()))
except:
return None
finally:
f.close()
def ProcessAt(argument):
if argument.startswith('@'):
strings = File2Strings(argument[1:])
if strings == None:
raise Exception('Error reading %s' % argument)
else:
return strings
else:
return [argument]
def AddPlugin(cClass):
global plugins
plugins.append(cClass)
def ExpandFilenameArguments(filenames):
return list(collections.OrderedDict.fromkeys(sum(map(glob.glob, sum(map(ProcessAt, filenames), [])), [])))
class cPluginParent():
onlyValidPDF = True
def LoadPlugins(plugins, verbose):
if plugins == '':
return
scriptPath = os.path.dirname(sys.argv[0])
for plugin in sum(map(ProcessAt, plugins.split(',')), []):
try:
if not plugin.lower().endswith('.py'):
plugin += '.py'
if os.path.dirname(plugin) == '':
if not os.path.exists(plugin):
scriptPlugin = os.path.join(scriptPath, plugin)
if os.path.exists(scriptPlugin):
plugin = scriptPlugin
exec(open(plugin, 'r').read())
except Exception as e:
print('Error loading plugin: %s' % plugin)
if verbose:
raise e
def PDFiDMain(filenames, options):
global plugins
plugins = []
LoadPlugins(options.plugins, options.verbose)
if options.csv:
if plugins != []:
Print(MakeCSVLine((('%s', 'Filename'), ('%s', 'Plugin-name'), ('%s', 'Score'))), options)
elif options.select != '':
Print('Filename', options)
for filename in filenames:
if options.scan:
Scan(filename, options, plugins)
else:
ProcessFile(filename, options, plugins)
def Main():
moredesc = '''
Arguments:
pdf-file and zip-file can be a single file, several files, and/or @file
@file: run PDFiD on each file listed in the text file specified
wildcards are supported
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(
usage='usage: %prog [options] [pdf-file|zip-file|url|@file] ...\n' + __description__ + moredesc,
version='%prog ' + __version__)
oParser.add_option('-s', '--scan', action='store_true', default=False, help='scan the given directory')
oParser.add_option('-a', '--all', action='store_true', default=False, help='display all the names')
oParser.add_option('-e', '--extra', action='store_true', default=False, help='display extra data, like dates')
oParser.add_option(
'-f',
'--force',
action='store_true',
default=False,
help='force the scan of the file, even without proper %PDF header')
oParser.add_option('-d', '--disarm', action='store_true', default=False, help='disable JavaScript and auto launch')
oParser.add_option(
'-p',
'--plugins',
type=str,
default='',
help='plugins to load (separate plugins with a comma , ; @file supported)')
oParser.add_option('-c', '--csv', action='store_true', default=False, help='output csv data when using plugins')
oParser.add_option('-m', '--minimumscore', type=float, default=0.0, help='minimum score for plugin results output')
oParser.add_option(
'-v', '--verbose', action='store_true', default=False, help='verbose (will also raise catched exceptions)')
oParser.add_option('-S', '--select', type=str, default='', help='selection expression')
oParser.add_option(
'-n', '--nozero', action='store_true', default=False, help='supress output for counts equal to zero')
oParser.add_option('-o', '--output', type=str, default='', help='output to log file')
oParser.add_option('--pluginoptions', type=str, default='', help='options for the plugin')
oParser.add_option(
'-l', '--literal', action='store_true', default=False, help='take filenames literally, no wildcards')
(options, args) = oParser.parse_args()
if len(args) == 0:
if options.disarm:
print('Option disarm not supported with stdin')
options.disarm = False
if options.scan:
print('Option scan not supported with stdin')
options.scan = False
filenames = ['']
elif options.literal:
filenames = args
else:
try:
filenames = ExpandFilenameArguments(args)
except Exception as e:
print(e)
return
PDFiDMain(filenames, options)
if __name__ == '__main__':
Main()
|
|
"""
Ax_Metrics - Test io.emfetch 'http' plugin
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import pytest
import axonchisel.metrics.foundation.chrono.timerange as timerange
import axonchisel.metrics.foundation.metricdef.metricdef as metricdef
import axonchisel.metrics.foundation.metricdef.filters as filters
import axonchisel.metrics.foundation.metricdef.mdefl as mdefl
import axonchisel.metrics.io.emfetch.plugins.emf_http as emf_http
from .util import dt, load_test_asset, log_config
import logging
# ----------------------------------------------------------------------------
# Use mock requests lib to simulate all HTTP requests?
# False for some real requests which require an actual API endpoint
# (customized via extinfo).
# Even with False, many requests in this test suite are still mocked.
MOCK_REQUESTS = True
# ----------------------------------------------------------------------------
def setup_module(module):
# log_config(level=logging.DEBUG)
log_config(level=logging.INFO)
# ----------------------------------------------------------------------------
class TestEMFetcher_http(object):
"""
Test EMFetcher 'http'.
"""
#
# Setup / Teardown
#
def setup_method(self, method):
self.extinfo = {
'api_url': 'http://localhost/apidemo/kpi_reduce/',
'api_key': 'TestKey',
# 'table_prefix': 'my_',
}
# Parse MDefL MetSet:
self.yaml_metset1 = load_test_asset('metset-http.yml')
self.parser1 = mdefl.MetSetParser()
self.metset1 = self.parser1.parse_ystr_metset(self.yaml_metset1)
#
# Tests
#
def test_maybe_real(self):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
if MOCK_REQUESTS:
self._mock_requests(emfetch1, '{ "body": { "result": 12345 } }')
emfetch1.plugin_create()
for x in range(1):
tmrange = timerange.TimeRange(
inc_begin=dt('2013-02-01'), exc_end=dt('2013-02-02'))
dpoint1 = emfetch1.fetch(tmrange)
print dpoint1
tmrange = timerange.TimeRange(
inc_begin=dt('2013-02-02'), exc_end=dt('2013-02-03'))
dpoint1 = emfetch1.fetch(tmrange)
print dpoint1
emfetch1.plugin_destroy()
def test_mock_good(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": 12345 } }')
self._run_emfetch(emfetch1, tmranges)
def test_mock_good_null(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": null } }')
self._run_emfetch(emfetch1, tmranges)
def test_mock_good_types(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": 12345.6789 } }')
mdef1.data_type = 'NUM_INT'
self._run_emfetch(emfetch1, tmranges)
mdef1.data_type = 'MONEY_INT100'
self._run_emfetch(emfetch1, tmranges)
mdef1.data_type = 'MONEY_FLOAT100'
self._run_emfetch(emfetch1, tmranges)
def test_mock_good_isolate(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
mdef1.emfetch_opts['options']['isolate'] = True
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": 12345 } }')
self._run_emfetch(emfetch1, tmranges)
def test_mock_good_http_post(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
mdef1.emfetch_opts['request']['method'] = 'POST'
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": 12345 } }')
self._run_emfetch(emfetch1, tmranges)
def test_mock_bad_response_format(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
mdef1.emfetch_opts['response']['format'] = 'BogusFormat'
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, 'Invalid JSON { XXX')
with pytest.raises(ValueError) as e:
self._run_emfetch(emfetch1, tmranges)
assert "response format" in str(e.value)
def test_mock_bad_json(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, 'Invalid JSON { XXX')
with pytest.raises(ValueError) as e:
self._run_emfetch(emfetch1, tmranges)
assert "not valid JSON" in str(e.value)
def test_mock_bad_json_content(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "dummy": "dummy" }')
with pytest.raises(KeyError) as e:
self._run_emfetch(emfetch1, tmranges)
assert "not found in 'JSON" in str(e.value)
def test_mock_bad_http_error(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, error="Fake HTTP Error")
with pytest.raises(ValueError) as e:
self._run_emfetch(emfetch1, tmranges)
assert "Fake HTTP Error" in str(e.value)
def test_mock_bad_http_method(self, tmranges):
mdef1 = self.metset1.get_metric_by_id('rev_new_sales')
mdef1.emfetch_opts['request']['method'] = 'BogusMethod'
emfetch1 = emf_http.EMFetcher_http(mdef1, extinfo=self.extinfo)
self._mock_requests(emfetch1, '{ "body": { "result": 12345 } }')
with pytest.raises(ValueError) as e:
self._run_emfetch(emfetch1, tmranges)
assert "HTTP method" in str(e.value)
#
# Internal Helpers
#
def _run_emfetch(self, emfetch1, tmranges):
"""
Run EMFetch through multiple TimeRanges, returning list of DataPoints.
Wrapped in plugin create, delete.
"""
dpoints = list()
emfetch1.plugin_create()
for tmrange in tmranges[1:3]:
dpoint1 = emfetch1.fetch(tmrange)
dpoints.append(dpoint1)
emfetch1.plugin_destroy()
return dpoints
def _mock_requests(self, emfetch1, resp_text="Response", error=None):
"""
Inject mock version of requests dependency into given EMFetcher_http.
Will simulate HTTP call and return resp_text.
"""
mockRequests = MockRequests(resp_text=resp_text, error=error)
emfetch1._use_requests_lib(mockRequests)
# ----------------------------------------------------------------------------
class MockRequests(object):
"""
A light mock version of requests lib.
Only implements what is used by this test.
"""
def __init__(self, resp_text="Response", error=None):
"""
Init with specific text to respond or optional error msg to raise.
"""
self.resp_text = resp_text
self.error = error
def get(self, url, params, **kwargs):
resp = MockRequestsResponse(self)
return resp
def post(self, url, data, **kwargs):
resp = MockRequestsResponse(self)
return resp
def close(self):
pass
def Session(self):
return self
class MockRequestsResponse(object):
"""
A mock version of requests lib response obj.
Only implements what is used by this test.
"""
def __init__(self, mock_requests):
self.mock_requests = mock_requests
@property
def text(self):
return self.mock_requests.resp_text
def raise_for_status(self):
if self.mock_requests.error:
raise ValueError(
"Mock Request Error: %s" % self.mock_requests.error)
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from oslo_config import cfg
import testscenarios
import oslo_messaging
from oslo_messaging.tests import utils as test_utils
from six.moves import mock
load_tests = testscenarios.load_tests_apply_scenarios
class ServerSetupMixin(object):
class Server(object):
def __init__(self, transport, topic, server, endpoint, serializer):
target = oslo_messaging.Target(topic=topic, server=server)
self._server = oslo_messaging.get_rpc_server(transport,
target,
[endpoint, self],
serializer=serializer)
def stop(self, ctxt):
# Check start() does nothing with a running server
self._server.start()
self._server.stop()
self._server.wait()
def start(self):
self._server.start()
class TestSerializer(object):
def serialize_entity(self, ctxt, entity):
return ('s' + entity) if entity else entity
def deserialize_entity(self, ctxt, entity):
return ('d' + entity) if entity else entity
def serialize_context(self, ctxt):
return dict([(k, 's' + v) for k, v in ctxt.items()])
def deserialize_context(self, ctxt):
return dict([(k, 'd' + v) for k, v in ctxt.items()])
def __init__(self):
self.serializer = self.TestSerializer()
def _setup_server(self, transport, endpoint, topic=None, server=None):
server = self.Server(transport,
topic=topic or 'testtopic',
server=server or 'testserver',
endpoint=endpoint,
serializer=self.serializer)
thread = threading.Thread(target=server.start)
thread.daemon = True
thread.start()
return thread
def _stop_server(self, client, server_thread, topic=None):
if topic is not None:
client = client.prepare(topic=topic)
client.cast({}, 'stop')
server_thread.join(timeout=30)
def _setup_client(self, transport, topic='testtopic'):
return oslo_messaging.RPCClient(transport,
oslo_messaging.Target(topic=topic),
serializer=self.serializer)
class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin):
def __init__(self, *args):
super(TestRPCServer, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts())
def test_constructor(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
serializer=serializer)
self.assertIs(server.conf, self.conf)
self.assertIs(server.transport, transport)
self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher)
self.assertIs(server.dispatcher.endpoints, endpoints)
self.assertIs(server.dispatcher.serializer, serializer)
self.assertEqual('blocking', server.executor)
def test_server_wait_method(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
serializer=serializer)
# Mocking executor
server._executor = mock.Mock()
# Here assigning executor's listener object to listener variable
# before calling wait method, beacuse in wait method we are
# setting executor to None.
listener = server._executor.listener
# call server wait method
server.wait()
self.assertIsNone(server._executor)
self.assertEqual(1, listener.cleanup.call_count)
def test_no_target_server(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
server = oslo_messaging.get_rpc_server(
transport,
oslo_messaging.Target(topic='testtopic'),
[])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testtopic', ex.target.topic)
else:
self.assertTrue(False)
def test_no_server_topic(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(server='testserver')
server = oslo_messaging.get_rpc_server(transport, target, [])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testserver', ex.target.server)
else:
self.assertTrue(False)
def _test_no_client_topic(self, call=True):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
client = self._setup_client(transport, topic=None)
method = client.call if call else client.cast
try:
method({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertIsNotNone(ex.target)
else:
self.assertTrue(False)
def test_no_client_topic_call(self):
self._test_no_client_topic(call=True)
def test_no_client_topic_cast(self):
self._test_no_client_topic(call=False)
def test_client_call_timeout(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
finished = False
wait = threading.Condition()
class TestEndpoint(object):
def ping(self, ctxt, arg):
with wait:
if not finished:
wait.wait()
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.prepare(timeout=0).call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex)
else:
self.assertTrue(False)
with wait:
finished = True
wait.notify()
self._stop_server(client, server_thread)
def test_unknown_executor(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
try:
oslo_messaging.get_rpc_server(transport, None, [], executor='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure)
self.assertEqual('foo', ex.executor)
else:
self.assertTrue(False)
def test_cast(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
endpoint = TestEndpoint()
server_thread = self._setup_server(transport, endpoint)
client = self._setup_client(transport)
client.cast({}, 'ping', arg='foo')
client.cast({}, 'ping', arg='bar')
self._stop_server(client, server_thread)
self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings)
def test_call(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertIsNone(client.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertEqual(False, client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_direct_call(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
direct = client.prepare(server='testserver')
self.assertIsNone(direct.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertEqual(False, client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_context(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ctxt_check(self, ctxt, key):
return ctxt[key]
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertEqual('dsdsb',
client.call({'dsa': 'b'},
'ctxt_check',
key='a'))
self._stop_server(client, server_thread)
def test_failure(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
raise ValueError(arg)
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
def test_expected_failure(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
@oslo_messaging.expected_exceptions(ValueError)
def ping(self, ctxt, arg):
raise ValueError(arg)
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin):
_exchanges = [
('same_exchange', dict(exchange1=None, exchange2=None)),
('diff_exchange', dict(exchange1='x1', exchange2='x2')),
]
_topics = [
('same_topic', dict(topic1='t', topic2='t')),
('diff_topic', dict(topic1='t1', topic2='t2')),
]
_server = [
('same_server', dict(server1=None, server2=None)),
('diff_server', dict(server1='s1', server2='s2')),
]
_fanout = [
('not_fanout', dict(fanout1=None, fanout2=None)),
('fanout', dict(fanout1=True, fanout2=True)),
]
_method = [
('call', dict(call1=True, call2=True)),
('cast', dict(call1=False, call2=False)),
]
_endpoints = [
('one_endpoint',
dict(multi_endpoints=False,
expect1=['ds1', 'ds2'],
expect2=['ds1', 'ds2'])),
('two_endpoints',
dict(multi_endpoints=True,
expect1=['ds1'],
expect2=['ds2'])),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges,
cls._topics,
cls._server,
cls._fanout,
cls._method,
cls._endpoints)
# fanout call not supported
def filter_fanout_call(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
call = params['call1'] or params['call2']
return not (call and fanout)
# listening multiple times on same topic/server pair not supported
def filter_same_topic_and_server(scenario):
params = scenario[1]
single_topic = params['topic1'] == params['topic2']
single_server = params['server1'] == params['server2']
return not (single_topic and single_server)
# fanout to multiple servers on same topic and exchange
# each endpoint will receive both messages
def fanout_to_servers(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
multi_servers = params['server1'] != params['server2']
if fanout and single_exchange and single_topic and multi_servers:
params['expect1'] = params['expect1'][:] + params['expect1']
params['expect2'] = params['expect2'][:] + params['expect2']
return scenario
# multiple endpoints on same topic and exchange
# either endpoint can get either message
def single_topic_multi_endpoints(scenario):
params = scenario[1]
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
if single_topic and single_exchange and params['multi_endpoints']:
params['expect_either'] = (params['expect1'] +
params['expect2'])
params['expect1'] = params['expect2'] = []
else:
params['expect_either'] = []
return scenario
for f in [filter_fanout_call, filter_same_topic_and_server]:
cls.scenarios = filter(f, cls.scenarios)
for m in [fanout_to_servers, single_topic_multi_endpoints]:
cls.scenarios = map(m, cls.scenarios)
def __init__(self, *args):
super(TestMultipleServers, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts())
def test_multiple_servers(self):
url1 = 'fake:///' + (self.exchange1 or '')
url2 = 'fake:///' + (self.exchange2 or '')
transport1 = oslo_messaging.get_transport(self.conf, url=url1)
if url1 != url2:
transport2 = oslo_messaging.get_transport(self.conf, url=url1)
else:
transport2 = transport1
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
def alive(self, ctxt):
return 'alive'
if self.multi_endpoints:
endpoint1, endpoint2 = TestEndpoint(), TestEndpoint()
else:
endpoint1 = endpoint2 = TestEndpoint()
thread1 = self._setup_server(transport1, endpoint1,
topic=self.topic1, server=self.server1)
thread2 = self._setup_server(transport2, endpoint2,
topic=self.topic2, server=self.server2)
client1 = self._setup_client(transport1, topic=self.topic1)
client2 = self._setup_client(transport2, topic=self.topic2)
client1 = client1.prepare(server=self.server1)
client2 = client2.prepare(server=self.server2)
if self.fanout1:
client1.call({}, 'alive')
client1 = client1.prepare(fanout=True)
if self.fanout2:
client2.call({}, 'alive')
client2 = client2.prepare(fanout=True)
(client1.call if self.call1 else client1.cast)({}, 'ping', arg='1')
(client2.call if self.call2 else client2.cast)({}, 'ping', arg='2')
self.assertTrue(thread1.isAlive())
self._stop_server(client1.prepare(fanout=None),
thread1, topic=self.topic1)
self.assertTrue(thread2.isAlive())
self._stop_server(client2.prepare(fanout=None),
thread2, topic=self.topic2)
def check(pings, expect):
self.assertEqual(len(expect), len(pings))
for a in expect:
self.assertIn(a, pings)
if self.expect_either:
check(endpoint1.pings + endpoint2.pings, self.expect_either)
else:
check(endpoint1.pings, self.expect1)
check(endpoint2.pings, self.expect2)
TestMultipleServers.generate_scenarios()
|
|
# These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
from __future__ import division, absolute_import, print_function
import os
import doctest
import inspect
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
from .nosetester import get_package_name
from .utils import KnownFailureException, KnownFailureTest
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
return module.__name__ == object.__module__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import (
isroutine, isclass, ismodule, isfunction, ismethod
)
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val)):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want = want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'" % sz, "int")
want = want.replace("'<i%d'" % sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger(object):
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailurePlugin(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailure is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureException,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailure '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
KnownFailure = KnownFailurePlugin # backwards compat
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Plex Server Manager by RogueProeliator <rp@rogueproeliator.com>
# See plugin.py for more plugin details and information
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Python imports
#/////////////////////////////////////////////////////////////////////////////////////////
import httplib
import re
import requests
import time
import urllib2
import xml.etree.ElementTree
import indigo
import RPFramework
#/////////////////////////////////////////////////////////////////////////////////////////
# Constants
#/////////////////////////////////////////////////////////////////////////////////////////
MEDIACONTAINERTYPE_UNKNOWN = 0
MEDIACONTAINERTYPE_SERVERNODE = 1
MEDIACONTAINERTYPE_CLIENTLIST = 2
MEDIACONTAINERTYPE_SESSIONLIST = 3
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# PlexMediaContainer
# Handles the XML-based MediaContainer element that all of the Plex Media Server API
# calls return
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class PlexMediaContainer(object):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor allows passing in the XML data that has a MediaContainer at its root
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, mediaContainerXml, plexContainerPath):
# setup the basic properties that we will populate for external use
self.containerAttributes = dict()
self.directories = list()
self.clients = list()
self.videoSessions = list()
# based upon what information we have, we should be able to determine what type of information
# is being stored in the dictionary...
if plexContainerPath == u'/':
self.containerType = MEDIACONTAINERTYPE_SERVERNODE
elif plexContainerPath == u'/clients':
self.containerType = MEDIACONTAINERTYPE_CLIENTLIST
elif plexContainerPath == u'/status/sessions':
self.containerType = MEDIACONTAINERTYPE_SESSIONLIST
else:
self.containerType = MEDIACONTAINERTYPE_UNKNOWN
# parse the XML provided...
mediaContainerNode = xml.etree.ElementTree.fromstring(RPFramework.RPFrameworkUtils.to_str(mediaContainerXml))
# the root container node will have a bunch of attributes which should be loaded into
# our attributes container
for key,value in mediaContainerNode.items():
self.containerAttributes[key] = value
# retrieve the list of directories that may be content of the media container
# node
for directoryNode in mediaContainerNode.findall(u'Directory'):
self.directories.append(PlexMediaContainerDirectory(directoryNode))
# retrieve the list of clients that may be content of the media container
# node (these are connected clients, not necessarily streaming now)
if self.containerType == MEDIACONTAINERTYPE_CLIENTLIST:
for clientNode in mediaContainerNode.findall(u'Server'):
self.clients.append(PlexMediaClient(clientNode))
# the session status requires special handling - it will have a Video node along with
# embedded player and media information nodes
if self.containerType == MEDIACONTAINERTYPE_SESSIONLIST:
for video in mediaContainerNode.findall(u'Video'):
self.videoSessions.append(PlexMediaContainerVideoSession(video))
for audio in mediaContainerNode.findall(u'Track'):
self.videoSessions.append(PlexMediaContainerVideoSession(audio))
mediaContainerNode.clear()
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# PlexMediaContainerDirectory
# Stores information about a "directory" element within the Plex API... this is
# basically anything that can be drilled deeper into its hierarchy
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class PlexMediaContainerDirectory(object):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor allows passing in the XML node of which the directory is its root
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, dictionaryXmlNode):
# we will keep a copy of the attributes of the dictionary as these are essentially
# "properties" of the object
self.dictionaryAttributes = dict()
self.genreList = []
# the root container node will have a bunch of attributes which should be loaded into
# our attributes container
loadXmlElementToDictionary(dictionaryXmlNode, self.dictionaryAttributes)
# there may be child Genre tracks...
for genreNode in dictionaryXmlNode.findall("Genre"):
self.genreList.append(genreNode.get("tag"))
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# PlexMediaClient
# Stores information about a "Server" element within the Plex API... this is
# basically a connected client
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class PlexMediaClient(object):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor allows passing in the XML node of which the directory is its root
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, serverXmlNode):
# we will keep a copy of the attributes of the dictionary as these are essentially
# "properties" of the object
self.clientAttributes = dict()
# the root container node will have a bunch of attributes which should be loaded into
# our attributes container
loadXmlElementToDictionary(serverXmlNode, self.clientAttributes)
#/////////////////////////////////////////////////////////////////////////////////////
# Public Utilities
#/////////////////////////////////////////////////////////////////////////////////////
def getClientId(self):
return RPFramework.RPFrameworkUtils.to_unicode(self.clientAttributes["machineIdentifier"] if "machineIdentifier" in self.clientAttributes else "")
def getClientName(self):
return RPFramework.RPFrameworkUtils.to_unicode(self.clientAttributes["product"] if "product" in self.clientAttributes else "")
def getClientAddress(self):
return RPFramework.RPFrameworkUtils.to_unicode(self.clientAttributes["address"] if "address" in self.clientAttributes else "")
def getClientPort(self):
return int(RPFramework.RPFrameworkUtils.to_unicode(self.clientAttributes["port"] if "port" in self.clientAttributes else "0"))
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# PlexMediaContainerVideo
# Stores information about a video that is being served in-session (as obtained from
# the session status request)
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class PlexMediaContainerVideoSession(object):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor allows passing in the XML node of which the video is its root
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, videoXmlNode):
self.videoAttributes = dict()
self.userInfo = dict()
self.playerInfo = dict()
self.mediaInfo = dict()
self.genreList = []
# the root Video node will have a bunch of attributes which should be loaded into
# our attributes container
loadXmlElementToDictionary(videoXmlNode, self.videoAttributes)
# there may be a "User" node if the session is not an anonymous session; if so then
# load all of the user's details into our user dictionary
userXmlNode = videoXmlNode.find("User")
if not userXmlNode is None:
loadXmlElementToDictionary(userXmlNode, self.userInfo)
# there should be a Player node that identifies what client/player is doing the
# streaming... load all of its properties in the appropriate dictionary
playerXmlNode = videoXmlNode.find("Player")
if not playerXmlNode is None:
loadXmlElementToDictionary(playerXmlNode, self.playerInfo)
# there may be specific media information that we should read
mediaXmlNode = videoXmlNode.find("Media")
if not mediaXmlNode is None:
loadXmlElementToDictionary(mediaXmlNode, self.mediaInfo)
if videoXmlNode.tag == u'Track':
# a separate call to the parent (album) must be made to get the genre list
pass
else:
# there may be multiple genre nodes associated with this media, list the name of each ("tag")
for genreNode in videoXmlNode.findall(u'Genre'):
self.genreList.append(genreNode.get(u'tag'))
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Static Utility Routines
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
def loadXmlElementToDictionary(xmlElement, targetDict):
for key,value in xmlElement.items():
targetDict[key] = value
|
|
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, ne_
import sys
import time
import threading
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy import create_engine, MetaData, INT, VARCHAR, Sequence, \
select, Integer, String, func, text, exc
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
users, metadata = None, None
class TransactionTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global users, metadata
metadata = MetaData()
users = Table('query_users', metadata,
Column('user_id', INT, primary_key = True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True,
)
users.create(testing.db)
def teardown(self):
testing.db.execute(users.delete()).close()
@classmethod
@testing.crashes('mysql+cymysql', 'deadlock')
def teardown_class(cls):
users.drop(testing.db)
def test_commits(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self):
"""test a basic rollback"""
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_raise(self):
connection = testing.db.connect()
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=1, user_name='user3')
transaction.commit()
assert False
except Exception , e:
print "Exception: ", e
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_transaction_container(self):
def go(conn, table, data):
for d in data:
conn.execute(table.insert(), d)
testing.db.transaction(go, users, [dict(user_id=1,
user_name='user1')])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
assert_raises(exc.DBAPIError, testing.db.transaction, go,
users, [{'user_id': 2, 'user_name': 'user2'},
{'user_id': 1, 'user_name': 'user3'}])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
def test_nested_rollback(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1,
user_name='user1')
connection.execute(users.insert(), user_id=2,
user_name='user2')
connection.execute(users.insert(), user_id=3,
user_name='user3')
trans2 = connection.begin()
try:
connection.execute(users.insert(), user_id=4,
user_name='user4')
connection.execute(users.insert(), user_id=5,
user_name='user5')
raise Exception('uh oh')
trans2.commit()
except:
trans2.rollback()
raise
transaction.rollback()
except Exception, e:
transaction.rollback()
raise
except Exception, e:
try:
assert str(e) == 'uh oh' # and not "This transaction is
# inactive"
finally:
connection.close()
def test_retains_through_options(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), user_id=2, user_name='user2')
transaction.rollback()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_nesting(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans2.commit()
transaction.rollback()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
def test_with_interface(self):
connection = testing.db.connect()
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
try:
connection.execute(users.insert(), user_id=2, user_name='user2.5')
except Exception, e:
trans.__exit__(*sys.exc_info())
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans.__exit__(None, None, None)
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 1)
connection.close()
def test_close(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 5)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 5
connection.close()
def test_close2(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
connection.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
connection.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name='user3')
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
connection.close()
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
connection.close()
# PG emergency shutdown:
# select * from pg_prepared_xacts
# ROLLBACK PREPARED '<xid>'
@testing.crashes('mysql', 'Crashing on 5.5, not worth it')
@testing.requires.skip_mysql_on_windows
@testing.requires.two_phase_transactions
@testing.requires.savepoints
def test_mixed_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction2 = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction3 = connection.begin_nested()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction4 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction4.commit()
transaction3.rollback()
connection.execute(users.insert(), user_id=5, user_name='user5')
transaction2.commit()
transaction.prepare()
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (5, )])
connection.close()
@testing.requires.two_phase_transactions
@testing.crashes('mysql+oursql',
'Times out in full test runs only, causing '
'subsequent tests to fail')
@testing.crashes('mysql+zxjdbc',
'Deadlocks, causing subsequent tests to fail')
@testing.fails_on('mysql', 'FIXME: unknown')
def test_two_phase_recover(self):
# MySQL recovery doesn't currently seem to work correctly
# Prepared transactions disappear when connections are closed
# and even when they aren't it doesn't seem possible to use the
# recovery id.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
connection.close()
connection2 = testing.db.connect()
eq_(connection2.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[])
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
eq_(connection2.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, )])
connection2.close()
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self):
conn = testing.db.connect()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=2, user_name='user2')
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=3, user_name='user3')
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=4, user_name='user4')
xa.prepare()
xa.commit()
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [('user1', ), ('user4', )])
conn.close()
class AutoRollbackTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData()
@classmethod
def teardown_class(cls):
metadata.drop_all(testing.db)
def test_rollback_deadlock(self):
"""test that returning connections to the pool clears any object
locks."""
conn1 = testing.db.connect()
conn2 = testing.db.connect()
users = Table('deadlock_users', metadata, Column('user_id',
INT, primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
users.create(conn1)
conn1.execute('select * from deadlock_users')
conn1.close()
# without auto-rollback in the connection pool's return() logic,
# this deadlocks in PostgreSQL, because conn1 is returned to the
# pool but still has a lock on "deadlock_users". comment out the
# rollback in pool/ConnectionFairy._close() to see !
users.drop(conn2)
conn2.close()
class ExplicitAutoCommitTest(fixtures.TestBase):
"""test the 'autocommit' flag on select() and text() objects.
Requires PostgreSQL so that we may define a custom function which
modifies the database. """
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global metadata, foo
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('id', Integer,
primary_key=True), Column('data', String(100)))
metadata.create_all()
testing.db.execute("create function insert_foo(varchar) "
"returns integer as 'insert into foo(data) "
"values ($1);select 1;' language sql")
def teardown(self):
foo.delete().execute().close()
@classmethod
def teardown_class(cls):
testing.db.execute('drop function insert_foo(varchar)')
metadata.drop_all()
def test_control(self):
# test that not using autocommit does not commit
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')]))
assert conn2.execute(select([foo.c.data])).fetchall() == []
conn1.execute(text("select insert_foo('moredata')"))
assert conn2.execute(select([foo.c.data])).fetchall() == []
trans = conn1.begin()
trans.commit()
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('moredata', )]
conn1.close()
conn2.close()
def test_explicit_compiled(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1'
)]).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.close()
conn2.close()
def test_explicit_connection(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data1'
)]))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# connection supersedes statement
conn1.execution_options(autocommit=False).\
execute(select([func.insert_foo('data2'
)]).execution_options(autocommit=True))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# ditto
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data3'
)]).execution_options(autocommit=False))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
), ('data2', ), ('data3', )])
conn1.close()
conn2.close()
def test_explicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')"
).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on select\(\) is deprecated',
r'autocommit\(\) is deprecated')
def test_explicit_compiled_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')],
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.execute(select([func.insert_foo('data2')]).autocommit())
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('data2', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on text\(\) is deprecated')
def test_explicit_text_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')",
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
def test_implicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("insert into foo (data) values "
"('implicitdata')"))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('implicitdata', )]
conn1.close()
conn2.close()
tlengine = None
class TLTransactionTest(fixtures.TestBase):
__requires__ = ('ad_hoc_engines', )
@classmethod
def setup_class(cls):
global users, metadata, tlengine
tlengine = testing_engine(options=dict(strategy='threadlocal'))
metadata = MetaData()
users = Table('query_users', metadata, Column('user_id', INT,
Sequence('query_users_id_seq', optional=True),
primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
metadata.create_all(tlengine)
def teardown(self):
tlengine.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
tlengine.close()
metadata.drop_all(tlengine)
tlengine.dispose()
def setup(self):
# ensure tests start with engine closed
tlengine.close()
@testing.crashes('oracle', 'TNS error of unknown origin occurs on the buildbot.')
def test_rollback_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.rollback()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.rollback()
def test_commit_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.commit()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.commit()
def test_prepare_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.prepare()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.prepare()
def test_connection_close(self):
"""test that when connections are closed for real, transactions
are rolled back and disposed."""
c = tlengine.contextual_connect()
c.begin()
assert c.in_transaction()
c.close()
assert not c.in_transaction()
def test_transaction_close(self):
c = tlengine.contextual_connect()
t = c.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
t2 = c.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.execute(users.insert(), user_id=4, user_name='user4')
t2.close()
result = c.execute('select * from query_users')
assert len(result.fetchall()) == 4
t.close()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
c.close()
external_connection.close()
def test_rollback(self):
"""test a basic rollback"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
external_connection.close()
def test_commit(self):
"""test a basic commit"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
external_connection.close()
def test_with_interface(self):
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
trans.commit()
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
trans.__exit__(Exception, "fake", None)
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
trans.__exit__(None, None, None)
eq_(
tlengine.execute(users.select().order_by(users.c.user_id)).fetchall(),
[
(1, 'user1'),
(2, 'user2'),
(4, 'user4'),
]
)
def test_commits(self):
connection = tlengine.connect()
assert connection.execute('select count(*) from query_users'
).scalar() == 0
connection.close()
connection = tlengine.contextual_connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute('select * from query_users')
l = result.fetchall()
assert len(l) == 3, 'expected 3 got %d' % len(l)
transaction.commit()
connection.close()
def test_rollback_off_conn(self):
# test that a TLTransaction opened off a TLConnection allows
# that TLConnection to be aware of the transactional context
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
external_connection.close()
def test_morerollback_off_conn(self):
# test that an existing TLConnection automatically takes place
# in a TLTransaction opened on a second TLConnection
conn = tlengine.contextual_connect()
conn2 = tlengine.contextual_connect()
trans = conn2.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
conn2.close()
external_connection.close()
def test_commit_off_connection(self):
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
conn.close()
external_connection.close()
def test_nesting_rollback(self):
"""tests nesting of transactions, rollback at the end"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.rollback()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_nesting_commit(self):
"""tests nesting of transactions, commit at the end."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.commit()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 5)
finally:
external_connection.close()
def test_mixed_nesting(self):
"""tests nesting of transactions off the TLEngine directly
inside of tranasctions off the connection from the TLEngine"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
conn = tlengine.contextual_connect()
trans = conn.begin()
trans2 = conn.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.begin()
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.execute(users.insert(), user_id=6, user_name='user6')
tlengine.execute(users.insert(), user_id=7, user_name='user7')
tlengine.commit()
tlengine.execute(users.insert(), user_id=8, user_name='user8')
tlengine.commit()
trans2.commit()
trans.rollback()
conn.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_more_mixed_nesting(self):
"""tests nesting of transactions off the connection from the
TLEngine inside of tranasctions off thbe TLEngine directly."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
connection = tlengine.contextual_connect()
connection.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans.commit()
tlengine.commit()
tlengine.rollback()
connection.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.rollback()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
tlengine.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
tlengine.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.rollback()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
tlengine.close()
def test_connections(self):
"""tests that contextual_connect is threadlocal"""
c1 = tlengine.contextual_connect()
c2 = tlengine.contextual_connect()
assert c1.connection is c2.connection
c2.close()
assert not c1.closed
assert not tlengine.closed
@testing.requires.independent_cursors
def test_result_closing(self):
"""tests that contextual_connect is threadlocal"""
r1 = tlengine.execute(select([1]))
r2 = tlengine.execute(select([1]))
row1 = r1.fetchone()
row2 = r2.fetchone()
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
# close again, nothing happens since resultproxy calls close()
# only once
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
r2.close()
assert r2.connection.closed
assert tlengine.closed
@testing.crashes('oracle+cx_oracle', 'intermittent failures on the buildbot')
def test_dispose(self):
eng = testing_engine(options=dict(strategy='threadlocal'))
result = eng.execute(select([1]))
eng.dispose()
eng.execute(select([1]))
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.prepare()
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.prepare()
tlengine.rollback()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
counters = None
class ForUpdateTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
@classmethod
def setup_class(cls):
global counters, metadata
metadata = MetaData()
counters = Table('forupdate_counters', metadata,
Column('counter_id', INT, primary_key=True),
Column('counter_value', INT),
test_needs_acid=True)
counters.create(testing.db)
def teardown(self):
testing.db.execute(counters.delete()).close()
@classmethod
def teardown_class(cls):
counters.drop(testing.db)
def increment(
self,
count,
errors,
update_style=True,
delay=0.005,
):
con = testing.db.connect()
sel = counters.select(for_update=update_style,
whereclause=counters.c.counter_id == 1)
for i in xrange(count):
trans = con.begin()
try:
existing = con.execute(sel).first()
incr = existing['counter_value'] + 1
time.sleep(delay)
con.execute(counters.update(counters.c.counter_id == 1,
values={'counter_value': incr}))
time.sleep(delay)
readback = con.execute(sel).first()
if readback['counter_value'] != incr:
raise AssertionError('Got %s post-update, expected '
'%s' % (readback['counter_value'], incr))
trans.commit()
except Exception, e:
trans.rollback()
errors.append(e)
break
con.close()
@testing.crashes('mssql', 'FIXME: unknown')
@testing.crashes('firebird', 'FIXME: unknown')
@testing.crashes('sybase', 'FIXME: unknown')
@testing.crashes('access', 'FIXME: unknown')
@testing.requires.independent_connections
def test_queued_update(self):
"""Test SELECT FOR UPDATE with concurrent modifications.
Runs concurrent modifications on a single row in the users
table, with each mutator trying to increment a value stored in
user_name.
"""
db = testing.db
db.execute(counters.insert(), counter_id=1, counter_value=0)
iterations, thread_count = 10, 5
threads, errors = [], []
for i in xrange(thread_count):
thrd = threading.Thread(target=self.increment,
args=(iterations, ),
kwargs={'errors': errors,
'update_style': True})
thrd.start()
threads.append(thrd)
for thrd in threads:
thrd.join()
for e in errors:
sys.stdout.write('Failure: %s\n' % e)
self.assert_(len(errors) == 0)
sel = counters.select(whereclause=counters.c.counter_id == 1)
final = db.execute(sel).first()
self.assert_(final['counter_value'] == iterations
* thread_count)
def overlap(
self,
ids,
errors,
update_style,
):
sel = counters.select(for_update=update_style,
whereclause=counters.c.counter_id.in_(ids))
con = testing.db.connect()
trans = con.begin()
try:
rows = con.execute(sel).fetchall()
time.sleep(0.25)
trans.commit()
except Exception, e:
trans.rollback()
errors.append(e)
con.close()
def _threaded_overlap(
self,
thread_count,
groups,
update_style=True,
pool=5,
):
db = testing.db
for cid in range(pool - 1):
db.execute(counters.insert(), counter_id=cid + 1,
counter_value=0)
errors, threads = [], []
for i in xrange(thread_count):
thrd = threading.Thread(target=self.overlap,
args=(groups.pop(0), errors,
update_style))
thrd.start()
threads.append(thrd)
for thrd in threads:
thrd.join()
return errors
@testing.crashes('mssql', 'FIXME: unknown')
@testing.crashes('firebird', 'FIXME: unknown')
@testing.crashes('sybase', 'FIXME: unknown')
@testing.crashes('access', 'FIXME: unknown')
@testing.requires.independent_connections
def test_queued_select(self):
"""Simple SELECT FOR UPDATE conflict test"""
errors = self._threaded_overlap(2, [(1, 2, 3), (3, 4, 5)])
for e in errors:
sys.stderr.write('Failure: %s\n' % e)
self.assert_(len(errors) == 0)
@testing.crashes('mssql', 'FIXME: unknown')
@testing.fails_on('mysql', 'No support for NOWAIT')
@testing.crashes('firebird', 'FIXME: unknown')
@testing.crashes('sybase', 'FIXME: unknown')
@testing.crashes('access', 'FIXME: unknown')
@testing.requires.independent_connections
def test_nowait_select(self):
"""Simple SELECT FOR UPDATE NOWAIT conflict test"""
errors = self._threaded_overlap(2, [(1, 2, 3), (3, 4, 5)],
update_style='nowait')
self.assert_(len(errors) != 0)
class IsolationLevelTest(fixtures.TestBase):
__requires__ = ('isolation_level', 'ad_hoc_engines')
def _default_isolation_level(self):
if testing.against('sqlite'):
return 'SERIALIZABLE'
elif testing.against('postgresql'):
return 'READ COMMITTED'
elif testing.against('mysql'):
return "REPEATABLE READ"
else:
assert False, "default isolation level not known"
def _non_default_isolation_level(self):
if testing.against('sqlite'):
return 'READ UNCOMMITTED'
elif testing.against('postgresql'):
return 'SERIALIZABLE'
elif testing.against('mysql'):
return "SERIALIZABLE"
else:
assert False, "non default isolation level not known"
def test_engine_param_stays(self):
eng = testing_engine()
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
eq_(
eng.dialect.get_isolation_level(
eng.connect().connection),
level
)
# check that it stays
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
def test_default_level(self):
eng = testing_engine(options=dict())
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
eng.dialect.set_isolation_level(
conn.connection, self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(options=dict(
isolation_level=
self._non_default_isolation_level()))
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
eng.dialect.set_isolation_level(conn.connection,
self._default_isolation_level())
eq_(eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level())
eng.dialect.reset_isolation_level(conn.connection)
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
conn.close()
def test_invalid_level(self):
eng = testing_engine(options=dict(isolation_level='FOO'))
assert_raises_message(
exc.ArgumentError,
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
("FOO", eng.dialect.name,
", ".join(eng.dialect._isolation_lookup)),
eng.connect)
def test_per_connection(self):
from sqlalchemy.pool import QueuePool
eng = testing_engine(options=dict(
poolclass=QueuePool,
pool_size=2, max_overflow=0))
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c2.connection),
self._default_isolation_level()
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection),
self._default_isolation_level()
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection),
self._default_isolation_level()
)
c3.close()
c4.close()
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select([1]).execution_options,
isolation_level=self._non_default_isolation_level()
)
def test_per_engine_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may "
r"only be specified on Connection.execution_options\(\). "
r"To set engine-wide isolation level, "
r"use the isolation_level argument to create_engine\(\).",
create_engine,
testing.db.url,
execution_options={'isolation_level':
self._non_default_isolation_level}
)
|
|
import asyncio
import http.cookies
import io
import json
import mimetypes
import os
import sys
import traceback
import urllib.parse
import warnings
import chardet
from aio2py.required import aiohttp
from aio2py.required.aiohttp import hdrs, helpers, streams
from .log import client_logger
from .streams import EOF_MARKER, FlowControlStreamReader
from .multidict import CIMultiDictProxy, MultiDictProxy, MultiDict, CIMultiDict
from aio2py.required.aiohttp.multipart import MultipartWriter
PY_341 = sys.version_info >= (3, 4, 1)
HTTP_PORT = 80
HTTPS_PORT = 443
class ClientRequest:
GET_METHODS = {hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union(
{hdrs.METH_DELETE, hdrs.METH_TRACE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(self, method, url, *,
params=None, headers=None, data=None, cookies=None,
files=None, auth=None, encoding='utf-8',
version=aiohttp.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None):
if loop is None:
loop = asyncio.get_event_loop()
self.url = url
self.method = method.upper()
self.encoding = encoding
self.chunked = chunked
self.compress = compress
self.loop = loop
self.response_class = response_class or ClientResponse
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_path(params)
self.update_headers(headers)
self.update_cookies(cookies)
self.update_content_encoding()
self.update_auth(auth)
if files:
warnings.warn(
'files parameter is deprecated. use data instead',
DeprecationWarning)
if data:
raise ValueError(
'data and files parameters are '
'not supported at the same time.')
data = files
self.update_body_from_data(data)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
if not netloc:
raise ValueError('Host could not be detected.')
# check domain idna encoding
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise ValueError('URL has an invalid label.')
# basic auth info
if '@' in netloc:
authinfo, netloc = netloc.split('@', 1)
self.auth = helpers.BasicAuth(*authinfo.split(':', 1))
# Record entire netloc for usage in host header
self.netloc = netloc
# extract host and port
self.ssl = scheme == 'https'
if ':' in netloc:
netloc, port_s = netloc.split(':', 1)
try:
self.port = int(port_s)
except ValueError:
raise ValueError(
'Port number could not be converted.') from None
else:
if self.ssl:
self.port = HTTPS_PORT
else:
self.port = HTTP_PORT
self.scheme = scheme
self.host = netloc
def update_version(self, version):
"""Convert request version to two elements tuple.
parser http version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_path(self, params):
"""Build path."""
# extract path
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(self.url)
if not path:
path = '/'
if isinstance(params, dict):
params = list(params.items())
elif isinstance(params, (MultiDictProxy, MultiDict)):
params = list(params.items())
if params:
params = urllib.parse.urlencode(params)
if query:
query = '%s&%s' % (query, params)
else:
query = params
self.path = urllib.parse.urlunsplit(
('', '', urllib.parse.quote(path, safe='/%:'), query, fragment))
self.url = urllib.parse.urlunsplit(
(scheme, netloc, self.path, '', ''))
def update_headers(self, headers):
"""Update request headers."""
self.headers = CIMultiDict()
if headers:
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, (MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key, value)
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in self.headers:
self.headers[hdr] = val
# add host
if hdrs.HOST not in self.headers:
self.headers[hdrs.HOST] = self.netloc
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = http.cookies.SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(c, name, value)
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self):
"""Set request content encoding."""
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
self.compress = enc
self.chunked = True # enable chunked, no need to deal with length
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
warnings.warn(
'BasicAuth() tuple is required instead ', DeprecationWarning)
auth = helpers.BasicAuth(*auth)
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, data):
if not data:
return
if isinstance(data, str):
data = data.encode(self.encoding)
if isinstance(data, (bytes, bytearray)):
self.body = data
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
elif isinstance(data, (asyncio.StreamReader, streams.DataQueue)):
self.body = data
elif asyncio.iscoroutine(data):
self.body = data
if (hdrs.CONTENT_LENGTH not in self.headers and
self.chunked is None):
self.chunked = True
elif isinstance(data, io.IOBase):
assert not isinstance(data, io.StringIO), \
'attempt to send text data instead of binary'
self.body = data
if not self.chunked and isinstance(data, io.BufferedReader):
# Not chunking if content-length can be determined
size = os.fstat(data.fileno()).st_size - data.tell()
self.headers[hdrs.CONTENT_LENGTH] = str(size)
self.chunked = False
else:
self.chunked = True
if hasattr(data, 'mode'):
if data.mode == 'r':
raise ValueError('file {!r} should be open in binary mode'
''.format(data))
if (hdrs.CONTENT_TYPE not in self.headers and
hasattr(data, 'name')):
mime = mimetypes.guess_type(data.name)[0]
mime = 'application/octet-stream' if mime is None else mime
self.headers[hdrs.CONTENT_TYPE] = mime
elif isinstance(data, MultipartWriter):
self.body = data.serialize()
self.headers.update(data.headers)
self.chunked = self.chunked or 8192
else:
if not isinstance(data, helpers.FormData):
data = helpers.FormData(data)
self.body = data(self.encoding)
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = data.content_type
if data.is_multipart:
self.chunked = self.chunked or 8192
else:
if (hdrs.CONTENT_LENGTH not in self.headers and
not self.chunked):
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
if 'chunked' not in te:
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
self.chunked = self.chunked if type(self.chunked) is int else 8192
else:
if 'chunked' in te:
self.chunked = 8192
else:
self.chunked = None
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def write_bytes(self, request, reader):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from self._continue
try:
if asyncio.iscoroutine(self.body):
exc = None
value = None
stream = self.body
while True:
try:
if exc is not None:
result = stream.throw(exc)
else:
result = stream.send(value)
except StopIteration as exc:
if isinstance(exc.value, bytes):
yield from request.write(exc.value, drain=True)
break
except:
self.response.close(True)
raise
if isinstance(result, asyncio.Future):
exc = None
value = None
try:
value = yield result
except Exception as err:
exc = err
elif isinstance(result, (bytes, bytearray)):
yield from request.write(result, drain=True)
value = None
else:
raise ValueError(
'Bytes object is expected, got: %s.' %
type(result))
elif isinstance(self.body, asyncio.StreamReader):
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
while chunk:
yield from request.write(chunk, drain=True)
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
elif isinstance(self.body, streams.DataQueue):
while True:
try:
chunk = yield from self.body.read()
if chunk is EOF_MARKER:
break
yield from request.write(chunk, drain=True)
except streams.EofStream:
break
elif isinstance(self.body, io.IOBase):
chunk = self.body.read(self.chunked)
while chunk:
request.write(chunk)
chunk = self.body.read(self.chunked)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
request.write(chunk)
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
else:
try:
ret = request.write_eof()
# NB: in asyncio 3.4.1+ StreamWriter.drain() is coroutine
# see bug #170
if (asyncio.iscoroutine(ret) or
isinstance(ret, asyncio.Future)):
yield from ret
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
self._writer = None
def send(self, writer, reader):
request = aiohttp.Request(writer, self.method, self.path, self.version)
if self.compress:
request.add_compression_filter(self.compress)
if self.chunked is not None:
request.enable_chunked_encoding()
request.add_chunking_filter(self.chunked)
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
request.add_headers(
*((k, v)
for k, v in ((k, value)
for k, value in self.headers.items())))
request.send_headers()
self._writer = asyncio.async(
self.write_bytes(request, reader), loop=self.loop)
self.response = self.response_class(
self.method, self.url, self.host,
writer=self._writer, continue100=self._continue)
self.response._post_init(self.loop)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
def terminate(self):
if self._writer is not None:
if hasattr(self.loop, 'is_closed'):
if not self.loop.is_closed():
self._writer.cancel()
else:
self._writer.cancel()
self._writer = None
class ClientResponse:
message = None # RawResponseMessage object
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
cookies = None # Response cookies (Set-Cookie)
content = None # Payload stream
_connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_response_parser = aiohttp.HttpResponseParser()
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_loop = None
_closed = True # to allow __del__ for non-initialized properly response
def __init__(self, method, url, host='', *, writer=None, continue100=None):
super().__init__()
self.method = method
self.url = url
self.host = host
self.headers = None
self._content = None
self._writer = writer
self._continue = continue100
self._closed = False
def _post_init(self, loop):
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
if PY_341:
def __del__(self):
if self._closed:
return
self.close()
warnings.warn("Unclosed response {!r}".format(self),
ResourceWarning)
context = {'client_response': self,
'message': 'Unclosed response'}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self):
out = io.StringIO()
print('<ClientResponse({}) [{} {}]>'.format(
self.url, self.status, self.reason), file=out)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self):
return self._connection
def waiting_for_continue(self):
return self._continue is not None
def _setup_connection(self, connection):
self._reader = connection.reader
self._connection = connection
self.content = self.flow_control_class(
connection.reader, loop=connection.loop)
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._setup_connection(connection)
while True:
httpstream = self._reader.set_parser(self._response_parser)
# read response
self.message = yield from httpstream.read()
if self.message.code != 100:
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# response status
self.version = self.message.version
self.status = self.message.code
self.reason = self.message.reason
# headers
self.headers = CIMultiDictProxy(self.message.headers)
# payload
response_with_body = self.method.lower() != 'head'
self._reader.set_parser(
aiohttp.HttpPayloadParser(self.message,
readall=read_until_eof,
response_with_body=response_with_body),
self.content)
# cookies
self.cookies = http.cookies.SimpleCookie()
if hdrs.SET_COOKIE in self.headers:
for hdr in self.headers.getall(hdrs.SET_COOKIE):
try:
self.cookies.load(hdr)
except http.cookies.CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
return self
def close(self, force=False):
if self._closed:
return
self._closed = True
if hasattr(self._loop, 'is_closed'):
if self._loop.is_closed():
return
if self._connection is not None:
if self.content and not self.content.at_eof():
force = True
if force:
self._connection.close()
else:
self._connection.release()
if self._reader is not None:
self._reader.unset_parser()
self._connection = None
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
@asyncio.coroutine
def release(self):
try:
chunk = yield from self.content.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self.content.readany()
finally:
self.close()
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
self.close()
@asyncio.coroutine
def read(self, decode=False):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except:
self.close(True)
raise
else:
self.close()
data = self._content
if decode:
warnings.warn(
'.read(True) is deprecated. use .json() instead',
DeprecationWarning)
return (yield from self.json())
return data
@asyncio.coroutine
def read_and_close(self, decode=False):
"""Read response payload and then close response."""
warnings.warn(
'read_and_close is deprecated, use .read() instead',
DeprecationWarning)
return (yield from self.read(decode))
def _get_encoding(self):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mtype, stype, _, params = helpers.parse_mimetype(ctype)
encoding = params.get('charset')
if not encoding:
encoding = chardet.detect(self._content)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding
@asyncio.coroutine
def text(self, encoding=None):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
if encoding is None:
encoding = self._get_encoding()
return self._content.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads):
"""Read and decodes JSON response."""
if self._content is None:
yield from self.read()
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if 'json' not in ctype:
client_logger.warning(
'Attempt to decode JSON with unexpected mimetype: %s', ctype)
if not self._content.strip():
return None
if encoding is None:
encoding = self._get_encoding()
return loads(self._content.decode(encoding))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2019_06_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
|
"""The tests for the MQTT JSON light platform.
Configuration with RGB, brightness, color temp, effect, white value and XY:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
white_value: true
xy: true
Configuration with RGB, brightness, color temp, effect, white value:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
white_value: true
Configuration with RGB, brightness, color temp and effect:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
color_temp: true
effect: true
rgb: true
Configuration with RGB, brightness and color temp:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
rgb: true
color_temp: true
Configuration with RGB, brightness:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
rgb: true
Config without RGB:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
brightness: true
Config without RGB and brightness:
light:
platform: mqtt_json
name: mqtt_json_light_1
state_topic: "home/rgb1"
command_topic: "home/rgb1/set"
Config with brightness and scale:
light:
platform: mqtt_json
name: test
state_topic: "mqtt_json_light_1"
command_topic: "mqtt_json_light_1/set"
brightness: true
brightness_scale: 99
"""
import json
from unittest import mock
from unittest.mock import ANY, patch
from homeassistant.components import light, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_coro,
mock_registry,
)
from tests.components.light import common
class JsonValidator(object):
"""Helper to compare JSON."""
def __init__(self, jsondata):
"""Initialize JSON validator."""
self.jsondata = jsondata
def __eq__(self, other):
"""Compare JSON data."""
return json.loads(self.jsondata) == json.loads(other)
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if setup fails with no command topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "json", "name": "test"}},
)
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_white_val_if_no_topics(hass, mqtt_mock):
"""Test for no RGB, brightness, color temp, effect, white val or XY."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling of the state via topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"rgb": True,
"white_value": True,
"xy": True,
"hs": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert state.attributes.get("hs_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light, full white
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness":255,'
'"color_temp":155,'
'"effect":"colorloop",'
'"white_value":150}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 155
assert state.attributes.get("effect") == "colorloop"
assert state.attributes.get("white_value") == 150
assert state.attributes.get("xy_color") == (0.323, 0.329)
assert state.attributes.get("hs_color") == (0.0, 0.0)
# Turn the light off
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "brightness":100}')
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", ' '"color":{"r":125,"g":125,"b":125}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"x":0.135,"y":0.135}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.141, 0.14)
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "color":{"h":180,"s":50}}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (180.0, 50.0)
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "color_temp":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("color_temp") == 155
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON", "effect":"colorloop"}'
)
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "colorloop"
async_fire_mqtt_message(hass, "test_light_rgb", '{"state":"ON", "white_value":155}')
light_state = hass.states.get("light.test")
assert light_state.attributes.get("white_value") == 155
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"color_temp": True,
"effect": True,
"hs": True,
"rgb": True,
"xy": True,
"white_value": True,
"qos": 2,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 191
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "ON"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", '{"state": "OFF"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255,'
' "x": 0.14, "y": 0.131, "h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59,'
' "x": 0.654, "y": 0.301, "h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
2,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0,'
' "x": 0.611, "y": 0.375, "h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
2,
False,
),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes["white_value"] == 80
assert state.attributes["xy_color"] == (0.611, 0.375)
async def test_sending_hs_color(hass, mqtt_mock):
"""Test light.turn_on with hs color sends hs color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"hs": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 210.824, "s": 100.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 359.0, "s": 78.0},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"h": 30.118, "s": 100.0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_no_brightness(hass, mqtt_mock):
"""Test light.turn_on with hs color sends rgb color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], brightness=255
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 0, "g": 24, "b": 50}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 50, "g": 11, "b": 11}}'),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "color": {"r": 255, "g": 128, "b": 0}}'),
0,
False,
),
],
any_order=True,
)
async def test_sending_rgb_color_with_brightness(hass, mqtt_mock):
"""Test light.turn_on with hs color sends rgb color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"rgb": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 0, "g": 123, "b": 255},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 56, "b": 59},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"r": 255, "g": 128, "b": 0},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_sending_xy_color(hass, mqtt_mock):
"""Test light.turn_on with hs color sends xy color parameters."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness": True,
"xy": True,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.14, "y": 0.131},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.654, "y": 0.301},'
' "brightness": 50}'
),
0,
False,
),
mock.call(
"test_light_rgb/set",
JsonValidator(
'{"state": "ON", "color": {"x": 0.611, "y": 0.375},'
' "white_value": 80}'
),
0,
False,
),
],
any_order=True,
)
async def test_flash_short_and_long(hass, mqtt_mock):
"""Test for flash length being sent when included."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"flash_time_short": 5,
"flash_time_long": 15,
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", flash="short")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 5}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="long")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", JsonValidator('{"state": "ON", "flash": 15}'), 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
async def test_transition(hass, mqtt_mock):
"""Test for transition time being sent when included."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test_light_rgb/set",
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", transition=15)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "ON", "transition": 15}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test", transition=30)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set",
JsonValidator('{"state": "OFF", "transition": 30}'),
0,
False,
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_brightness_scale(hass, mqtt_mock):
"""Test for brightness scaling."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_bright_scale",
"command_topic": "test_light_bright_scale/set",
"brightness": True,
"brightness_scale": 99,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light
async_fire_mqtt_message(hass, "test_light_bright_scale", '{"state":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
# Turn on the light with brightness
async_fire_mqtt_message(
hass, "test_light_bright_scale", '{"state":"ON", "brightness": 99}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async def test_invalid_color_brightness_and_white_values(hass, mqtt_mock):
"""Test that invalid color/brightness/white values are ignored."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"brightness": True,
"rgb": True,
"white_value": True,
"qos": "0",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 185
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Turn on the light
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",'
'"color":{"r":255,"g":255,"b":255},'
'"brightness": 255,'
'"white_value": 255}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("white_value") == 255
# Bad color values
async_fire_mqtt_message(
hass,
"test_light_rgb",
'{"state":"ON",' '"color":{"r":"bad","g":"val","b":"test"}}',
)
# Color should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
# Bad brightness values
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"brightness": "badValue"}'
)
# Brightness should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
# Bad white value
async_fire_mqtt_message(
hass, "test_light_rgb", '{"state":"ON",' '"white_value": "badValue"}'
)
# White value should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 255
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"availability_topic": "availability-topic",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get("light.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"availability_topic": "availability-topic",
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get("light.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("light.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("light.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "json",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("light.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "schema": "json",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "schema": "json",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one light per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids(light.DOMAIN)) == 1
async def test_discovery_removal(hass, mqtt_mock, caplog):
"""Test removal of discovered mqtt_json lights."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {"mqtt": {}}, entry)
data = '{ "name": "Beer",' ' "schema": "json",' ' "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
async def test_discovery_deprecated(hass, mqtt_mock, caplog):
"""Test discovery of mqtt_json light with deprecated platform option."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {"mqtt": {}}, entry)
data = (
'{ "name": "Beer",'
' "platform": "mqtt_json",'
' "command_topic": "test_topic"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("light.milk")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "json",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("light.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("light.beer")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"schema": "json",
"state_topic": "test-topic",
"command_topic": "command-topic",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("light.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("light.beer", new_entity_id="light.milk")
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
state = hass.states.get("light.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
|
|
# coding: utf-8
"""
jinja2schema.visitors.expr
~~~~~~~~~~~~~~~~~~~~~~~~~~
Expression is an instance of :class:`jinja2.nodes.Expr`.
Expression visitors return a tuple which contains expression type and expression structure.
"""
import functools
from jinja2 import nodes
from ..model import Scalar, Dictionary, List, Unknown, Tuple, String, Number, Boolean
from ..mergers import merge_rtypes, merge, merge_many, merge_bool_expr_structs
from ..exceptions import InvalidExpression, UnexpectedExpression, MergeException
from ..config import default_config
from .. import _compat
from .util import visit_many
class Context(object):
"""
Context is used when parsing expressions.
Suppose there is an expression::
{{ data.field.subfield }}
It has the following AST::
Getattr(
node=Getattr(
node=Name(name='data')
attr='field'
),
attr='subfield'
)
:func:`visit_getattr` returns a pair that looks like this::
(
# return type:
Scalar(...),
# structure:
{
'data: {
'field': {
'subfield': Scalar(...)
}
}
}
}
The return type is defined by the outermost :class:`nodes.Getattr` node, which
in this case is being printed.
The structure is build during AST traversal from outer to inners nodes and it is
kind of "reversed" in relation to the AST.
:class:`Context` is intended for:
* capturing a return type and passing it to the innermost expression node;
* passing a structure "under construction" to the visitors of nested nodes.
Let's look through an example.
Suppose :func:`visit_getattr` is called with the following arguments::
ast = Getattr(node=Getattr(node=Name(name='data'), attr='field'), attr='subfield'))
context = Context(return_struct_cls=Scalar, predicted_struct=Scalar())
It looks to the outermost AST node and based on it's type (which is :class:`nodes.Getattr`)
and it's ``attr`` field (which equals to ``"subfield"``) infers that a variable described by the
nested AST node must a dictionary with ``"subfield"`` key.
It calls a visitor for inner node and :func:`visit_getattr` gets called again, but
with different arguments::
ast = Getattr(node=Name(name='data', ctx='load'), attr='field')
ctx = Context(return_struct_cls=Scalar, predicted_struct=Dictionary({subfield: Scalar()}))
:func:`visit_getattr` applies the same logic again. The inner node is a :class:`nodes.Name`, so that
it calls :func:`visit_name` with the following arguments::
ast = Name(name='data')
ctx = Context(
return_struct_cls=Scalar,
predicted_struct=Dictionary({
field: Dictionary({subfield: Scalar()}))
})
)
:func:`visit_name` does not do much by itself. Based on a context it knows what structure and
what type must have a variable described by a given :class:`nodes.Name` node, so
it just returns a pair::
(instance of context.return_struct_cls, Dictionary({data: context.predicted_struct}})
"""
def __init__(self, ctx=None, return_struct_cls=None, predicted_struct=None):
self.predicted_struct = None
self.return_struct_cls = Unknown
if ctx:
self.predicted_struct = ctx.predicted_struct
self.return_struct_cls = ctx.return_struct_cls
if predicted_struct:
self.predicted_struct = predicted_struct
if return_struct_cls:
self.return_struct_cls = return_struct_cls
def get_predicted_struct(self, label=None):
rv = self.predicted_struct.clone()
if label:
rv.label = label
return rv
def meet(self, actual_struct, actual_ast):
try:
merge(self.predicted_struct, actual_struct)
except MergeException:
raise UnexpectedExpression(self.predicted_struct, actual_ast, actual_struct)
else:
return True
expr_visitors = {}
def visits_expr(node_cls):
"""Decorator that registers a function as a visitor for ``node_cls``.
:param node_cls: subclass of :class:`jinja2.nodes.Expr`
"""
def decorator(func):
expr_visitors[node_cls] = func
@functools.wraps(func)
def wrapped_func(ast, ctx, macroses=None, config=default_config):
assert isinstance(ast, node_cls)
return func(ast, ctx, macroses=macroses, config=config)
return wrapped_func
return decorator
def visit_expr(ast, ctx, macroses=None, config=default_config):
"""Returns a structure of ``ast``.
:param ctx: :class:`Context`
:param ast: instance of :class:`jinja2.nodes.Expr`
:returns: a tuple where the first element is an expression type (instance of :class:`Variable`)
and the second element is an expression structure (instance of :class:`.model.Dictionary`)
"""
visitor = expr_visitors.get(type(ast))
if not visitor:
for node_cls, visitor_ in _compat.iteritems(expr_visitors):
if isinstance(ast, node_cls):
visitor = visitor_
if not visitor:
raise Exception('expression visitor for {0} is not found'.format(type(ast)))
return visitor(ast, ctx, macroses, config=config)
def _visit_dict(ast, ctx, macroses, items, config=default_config):
"""A common logic behind nodes.Dict and nodes.Call (``{{ dict(a=1) }}``)
visitors.
:param items: a list of (key, value); key may be either AST node or string
"""
ctx.meet(Dictionary(), ast)
rtype = Dictionary.from_ast(ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
struct = Dictionary()
for key, value in items:
value_rtype, value_struct = visit_expr(value, Context(
predicted_struct=Unknown.from_ast(value, order_nr=config.ORDER_OBJECT.get_next())), macroses, config=config)
struct = merge(struct, value_struct)
if isinstance(key, nodes.Node):
key_rtype, key_struct = visit_expr(key, Context(
predicted_struct=Scalar.from_ast(key, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, key_struct)
if isinstance(key, nodes.Const):
rtype[key.value] = value_rtype
elif isinstance(key, _compat.string_types):
rtype[key] = value_rtype
return rtype, struct
@visits_expr(nodes.BinExpr)
def visit_bin_expr(ast, ctx, macroses=None, config=default_config):
l_rtype, l_struct = visit_expr(ast.left, ctx, macroses, config=config)
r_rtype, r_struct = visit_expr(ast.right, ctx, macroses, config=config)
rv = merge_bool_expr_structs(l_struct, r_struct)
return merge_rtypes(l_rtype, r_rtype, operator=ast.operator), rv
@visits_expr(nodes.UnaryExpr)
def visit_unary_expr(ast, ctx, macroses=None, config=default_config):
return visit_expr(ast.node, ctx, macroses, config=config)
@visits_expr(nodes.Compare)
def visit_compare(ast, ctx, macroses=None, config=default_config):
ctx.meet(Boolean(), ast)
rtype, struct = visit_expr(ast.expr, Context(
predicted_struct=Unknown.from_ast(ast.expr, order_nr=config.ORDER_OBJECT.get_next())), macroses, config=config)
for op in ast.ops:
op_rtype, op_struct = visit_expr(op.expr, Context(
predicted_struct=Unknown.from_ast(ast.expr, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, op_struct)
return Boolean.from_ast(ast, order_nr=config.ORDER_OBJECT.get_next()), struct
@visits_expr(nodes.Slice)
def visit_slice(ast, ctx, macroses=None, config=default_config):
nodes = [node for node in [ast.start, ast.stop, ast.step] if node is not None]
struct = visit_many(nodes, macroses, config,
predicted_struct_cls=Number,
return_struct_cls=Number)
return Unknown(), struct
@visits_expr(nodes.Name)
def visit_name(ast, ctx, macroses=None, config=default_config):
kwargs = {
'order_nr': config.ORDER_OBJECT.get_next()
}
return ctx.return_struct_cls.from_ast(ast, **kwargs), Dictionary({
ast.name: ctx.get_predicted_struct(label=ast.name)
})
@visits_expr(nodes.Getattr)
def visit_getattr(ast, ctx, macroses=None, config=default_config):
context = Context(
ctx=ctx,
predicted_struct=Dictionary.from_ast(ast, {
ast.attr: ctx.get_predicted_struct(label=ast.attr),
}, order_nr=config.ORDER_OBJECT.get_next()))
return visit_expr(ast.node, context, macroses, config=config)
@visits_expr(nodes.Getitem)
def visit_getitem(ast, ctx, macroses=None, config=default_config):
arg = ast.arg
if isinstance(arg, nodes.Const):
if isinstance(arg.value, int):
if config.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE == 'list':
predicted_struct = List.from_ast(ast, ctx.get_predicted_struct(),
order_nr=config.ORDER_OBJECT.get_next())
elif config.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE == 'dictionary':
predicted_struct = Dictionary.from_ast(ast, {
arg.value: ctx.get_predicted_struct(),
}, order_nr=config.ORDER_OBJECT.get_next())
elif config.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE == 'tuple':
items = [Unknown() for i in range(arg.value + 1)]
items[arg.value] = ctx.get_predicted_struct()
predicted_struct = Tuple.from_ast(ast, tuple(items), may_be_extended=True,
order_nr=config.ORDER_OBJECT.get_next())
elif isinstance(arg.value, _compat.string_types):
predicted_struct = Dictionary.from_ast(ast, {
arg.value: ctx.get_predicted_struct(label=arg.value),
}, order_nr=config.ORDER_OBJECT.get_next())
else:
raise InvalidExpression(arg, '{0} is not supported as an index for a list or'
' a key for a dictionary'.format(arg.value))
elif isinstance(arg, nodes.Slice):
predicted_struct = List.from_ast(ast, ctx.get_predicted_struct(), order_nr=config.ORDER_OBJECT.get_next())
else:
if config.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE == 'list':
predicted_struct = List.from_ast(ast, ctx.get_predicted_struct(), order_nr=config.ORDER_OBJECT.get_next())
elif config.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE == 'dictionary':
predicted_struct = Dictionary.from_ast(ast, order_nr=config.ORDER_OBJECT.get_next())
_, arg_struct = visit_expr(arg,
Context(predicted_struct=Scalar.from_ast(arg, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
rtype, struct = visit_expr(ast.node, Context(
ctx=ctx,
predicted_struct=predicted_struct), macroses, config=config)
return rtype, merge(struct, arg_struct)
@visits_expr(nodes.Test)
def visit_test(ast, ctx, macroses=None, config=default_config):
ctx.meet(Boolean(), ast)
if ast.name in ('divisibleby', 'escaped', 'even', 'lower', 'odd', 'upper'):
# TODO
predicted_struct = Scalar.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
elif ast.name in ('defined', 'undefined', 'equalto', 'iterable', 'mapping',
'none', 'number', 'sameas', 'sequence', 'string'):
predicted_struct = Unknown.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
if ast.name == 'defined':
predicted_struct.checked_as_defined = True
elif ast.name == 'undefined':
predicted_struct.checked_as_undefined = True
else:
raise InvalidExpression(ast, 'unknown test "{0}"'.format(ast.name))
rtype, struct = visit_expr(ast.node, Context(return_struct_cls=Boolean,
predicted_struct=predicted_struct), macroses, config=config)
if ast.name == 'divisibleby':
if not ast.args:
raise InvalidExpression(ast, 'divisibleby must have an argument')
_, arg_struct = visit_expr(ast.args[0],
Context(predicted_struct=Number.from_ast(ast.args[0],
order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
struct = merge(arg_struct, struct)
return rtype, struct
@visits_expr(nodes.Concat)
def visit_concat(ast, ctx, macroses=None, config=default_config):
ctx.meet(Scalar(), ast)
return String.from_ast(ast, order_nr=config.ORDER_OBJECT.get_next()), visit_many(ast.nodes, macroses, config,
predicted_struct_cls=String)
@visits_expr(nodes.CondExpr)
def visit_cond_expr(ast, ctx, macroses=None, config=default_config):
if config.BOOLEAN_CONDITIONS:
test_predicted_struct = Boolean.from_ast(ast.test, order_nr=config.ORDER_OBJECT.get_next())
else:
test_predicted_struct = Unknown.from_ast(ast.test, order_nr=config.ORDER_OBJECT.get_next())
test_rtype, test_struct = visit_expr(ast.test, Context(predicted_struct=test_predicted_struct), macroses,
config=config)
if_rtype, if_struct = visit_expr(ast.expr1, ctx, macroses, config=config)
else_rtype, else_struct = visit_expr(ast.expr2, ctx, macroses, config=config)
struct = merge_many(if_struct, test_struct, else_struct)
rtype = merge_rtypes(if_rtype, else_rtype)
for var_name, var_struct in test_struct.iteritems():
if var_struct.checked_as_defined or var_struct.checked_as_undefined:
if var_struct.checked_as_undefined:
lookup_struct = if_struct
elif var_struct.checked_as_defined:
lookup_struct = else_struct
struct[var_name].may_be_defined = (lookup_struct and
var_name in lookup_struct and
lookup_struct[var_name].constant)
struct[var_name].checked_as_defined = test_struct[var_name].checked_as_defined and (
not lookup_struct or not var_name in lookup_struct or lookup_struct[var_name].constant
)
struct[var_name].checked_as_undefined = test_struct[var_name].checked_as_undefined and (
not lookup_struct or not var_name in lookup_struct or lookup_struct[var_name].constant
)
return rtype, struct
@visits_expr(nodes.Call)
def visit_call(ast, ctx, macroses=None, config=default_config):
if isinstance(ast.node, nodes.Name):
if macroses and ast.node.name in macroses:
macro = macroses[ast.node.name]
call = MacroCall(macro, ast.args, ast.kwargs, config=config)
args_struct = call.match_passed_args_to_expected_args()
if call.passed_args:
args_struct = merge(args_struct, call.match_passed_args_to_expected_kwargs())
if call.passed_kwargs:
args_struct = merge(args_struct, call.match_passed_kwargs_to_expected_args())
if call.passed_kwargs:
args_struct = merge(args_struct, call.match_passed_kwargs_to_expected_kwargs())
if call.passed_args or call.expected_args:
raise InvalidExpression(ast, ('incorrect usage of "{0}". it takes '
'exactly {1} positional arguments'.format(macro.name, len(macro.args))))
if call.passed_kwargs:
first_unknown_kwarg = next(_compat.iterkeys(call.passed_kwargs))
raise InvalidExpression(ast, ('incorrect usage of "{0}". unknown keyword argument '
'"{1}" is passed'.format(macro.name, first_unknown_kwarg)))
return Unknown(), args_struct
elif ast.node.name == 'range':
ctx.meet(List(Unknown()), ast)
struct = Dictionary()
for arg in ast.args:
arg_rtype, arg_struct = visit_expr(arg, Context(
predicted_struct=Number.from_ast(arg, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, arg_struct)
return List(Number()), struct
elif ast.node.name == 'lipsum':
ctx.meet(Scalar(), ast)
struct = Dictionary()
# probable TODO: set possible types for args and kwargs
for arg in ast.args:
arg_rtype, arg_struct = visit_expr(arg, Context(
predicted_struct=Scalar.from_ast(arg, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, arg_struct)
for kwarg in ast.kwargs:
arg_rtype, arg_struct = visit_expr(kwarg.value, Context(
predicted_struct=Scalar.from_ast(kwarg, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, arg_struct)
return String(), struct
elif ast.node.name == 'dict':
ctx.meet(Dictionary(), ast)
if ast.args:
raise InvalidExpression(ast, 'dict accepts only keyword arguments')
return _visit_dict(ast, ctx, macroses, [(kwarg.key, kwarg.value) for kwarg in ast.kwargs], config=config)
else:
raise InvalidExpression(ast, '"{0}" call is not supported'.format(ast.node.name))
elif isinstance(ast.node, nodes.Getattr):
if ast.node.attr in ('keys', 'iterkeys', 'values', 'itervalues'):
ctx.meet(List(Unknown()), ast)
rtype, struct = visit_expr(
ast.node.node, Context(
predicted_struct=Dictionary.from_ast(ast.node.node, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
return List(Unknown()), struct
if ast.node.attr in ('startswith', 'endswith'):
ctx.meet(Boolean(), ast)
rtype, struct = visit_expr(
ast.node.node,
Context(predicted_struct=String.from_ast(ast.node.node, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
return Boolean(), struct
if ast.node.attr == 'split':
ctx.meet(List(String()), ast)
rtype, struct = visit_expr(
ast.node.node,
Context(predicted_struct=String.from_ast(ast.node.node, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
if ast.args:
arg = ast.args[0]
_, arg_struct = visit_expr(arg, Context(
predicted_struct=String.from_ast(arg, order_nr=config.ORDER_OBJECT.get_next())), macroses,
config=config)
struct = merge(struct, arg_struct)
return List(String()), struct
raise InvalidExpression(ast, '"{0}" call is not supported'.format(ast.node.attr))
@visits_expr(nodes.Filter)
def visit_filter(ast, ctx, macroses=None, config=default_config):
return_struct_cls = None
if ast.name in ('abs', 'striptags', 'capitalize', 'center', 'escape', 'filesizeformat',
'float', 'forceescape', 'format', 'indent', 'int', 'replace', 'round',
'safe', 'string', 'striptags', 'title', 'trim', 'truncate', 'upper',
'urlencode', 'urlize', 'wordcount', 'wordwrap', 'e'):
ctx.meet(Scalar(), ast)
if ast.name in ('abs', 'round'):
node_struct = Number.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = Number
elif ast.name in ('float', 'int'):
node_struct = Scalar.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = Number
elif ast.name in ('striptags', 'capitalize', 'center', 'escape', 'forceescape', 'format', 'indent',
'replace', 'safe', 'title', 'trim', 'truncate', 'upper', 'urlencode',
'urlize', 'wordwrap', 'e'):
node_struct = String.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = String
elif ast.name == 'filesizeformat':
node_struct = Number.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = String
elif ast.name == 'string':
node_struct = Scalar.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = String
elif ast.name == 'wordcount':
node_struct = String.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
return_struct_cls = Number
else:
node_struct = Scalar.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
elif ast.name in ('batch', 'slice'):
ctx.meet(List(List(Unknown())), ast)
rtype = List(List(Unknown(), linenos=[ast.node.lineno]), linenos=[ast.node.lineno])
node_struct = merge(rtype, ctx.get_predicted_struct()).item
_, struct = visit_expr(ast.node, Context(
ctx=ctx,
return_struct_cls=return_struct_cls,
predicted_struct=node_struct
), macroses, config=config)
return rtype, struct
elif ast.name == 'default':
default_value_rtype, default_value_struct = visit_expr(
ast.args[0],
Context(predicted_struct=Unknown.from_ast(ast.args[0], order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
node_struct = merge(
ctx.get_predicted_struct(),
default_value_rtype,
)
node_struct.used_with_default = True
node_struct.value = default_value_rtype.value
elif ast.name == 'dictsort':
ctx.meet(List(Tuple([Scalar(), Unknown()])), ast)
node_struct = Dictionary.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
elif ast.name == 'join':
ctx.meet(Scalar(), ast)
node_struct = List.from_ast(ast.node, String(), order_nr=config.ORDER_OBJECT.get_next())
rtype, struct = visit_expr(ast.node, Context(
return_struct_cls=String,
predicted_struct=node_struct
), macroses, config=config)
arg_rtype, arg_struct = visit_expr(ast.args[0],
Context(predicted_struct=String.from_ast(ast.args[0],
order_nr=config.ORDER_OBJECT.get_next())),
macroses, config=config)
return rtype, merge(struct, arg_struct)
elif ast.name in ('first', 'last', 'random', 'length', 'sum'):
if ast.name in ('first', 'last', 'random'):
el_struct = ctx.get_predicted_struct()
elif ast.name == 'length':
ctx.meet(Scalar(), ast)
return_struct_cls = Number
el_struct = Unknown()
else:
ctx.meet(Scalar(), ast)
el_struct = Scalar()
node_struct = List.from_ast(ast.node, el_struct, order_nr=config.ORDER_OBJECT.get_next())
elif ast.name in ('groupby', 'map', 'reject', 'rejectattr', 'select', 'selectattr', 'sort'):
ctx.meet(List(Unknown()), ast)
node_struct = merge(
List(Unknown()),
ctx.get_predicted_struct()
)
elif ast.name == 'list':
ctx.meet(List(Scalar()), ast)
node_struct = merge(
List(Scalar.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())),
ctx.get_predicted_struct()
).item
elif ast.name == 'pprint':
ctx.meet(Scalar(), ast)
node_struct = ctx.get_predicted_struct()
elif ast.name == 'xmlattr':
ctx.meet(Scalar(), ast)
node_struct = Dictionary.from_ast(ast.node, order_nr=config.ORDER_OBJECT.get_next())
elif ast.name == 'attr':
raise InvalidExpression(ast, 'attr filter is not supported')
else:
raise InvalidExpression(ast, 'unknown filter')
rv = visit_expr(ast.node, Context(
ctx=ctx,
return_struct_cls=return_struct_cls,
predicted_struct=node_struct
), macroses, config=config)
return rv
# :class:`nodes.Literal` visitors
@visits_expr(nodes.TemplateData)
def visit_template_data(ast, ctx, macroses=None, config=default_config):
return Scalar(), Dictionary()
@visits_expr(nodes.Const)
def visit_const(ast, ctx, macroses=None, config=default_config):
ctx.meet(Scalar(), ast)
if isinstance(ast.value, _compat.string_types):
rtype = String.from_ast(ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
elif isinstance(ast.value, bool):
rtype = Boolean.from_ast(ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
elif isinstance(ast.value, (int, float, complex)):
rtype = Number.from_ast(ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
else:
rtype = Scalar.from_ast(ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
return rtype, Dictionary()
@visits_expr(nodes.Tuple)
def visit_tuple(ast, ctx, macroses=None, config=default_config):
ctx.meet(Tuple(None), ast)
struct = Dictionary()
item_structs = []
for item in ast.items:
item_rtype, item_struct = visit_expr(item, ctx, macroses, config=config)
item_structs.append(item_rtype)
struct = merge(struct, item_struct)
rtype = Tuple.from_ast(ast, item_structs, constant=True, order_nr=config.ORDER_OBJECT.get_next())
return rtype, struct
@visits_expr(nodes.List)
def visit_list(ast, ctx, macroses=None, config=default_config):
ctx.meet(List(Unknown()), ast)
struct = Dictionary()
predicted_struct = merge(List(Unknown()), ctx.get_predicted_struct()).item
el_rtype = None
for item in ast.items:
item_rtype, item_struct = visit_expr(item, Context(predicted_struct=predicted_struct), macroses, config=config)
struct = merge(struct, item_struct)
if el_rtype is None:
el_rtype = item_rtype
else:
el_rtype = merge_rtypes(el_rtype, item_rtype)
rtype = List.from_ast(ast, el_rtype or Unknown(), constant=True, order_nr=config.ORDER_OBJECT.get_next())
return rtype, struct
@visits_expr(nodes.Dict)
def visit_dict(ast, ctx, macroses=None, config=default_config):
ctx.meet(Dictionary(), ast)
return _visit_dict(ast, ctx, macroses, [(item.key, item.value) for item in ast.items], config=config)
from ..macro import MacroCall
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dateutil import tz
from datetime import datetime, timedelta
import unittest
from c7n import filters as base_filters
from c7n.resources.ec2 import filters
from c7n.utils import annotation
from .common import instance, event_data, Bag
class BaseFilterTest(unittest.TestCase):
def assertFilter(self, f, i, v):
"""
f: filter data/spec
i: instance
v: expected value (true/false)
"""
try:
self.assertEqual(filters.factory(f)(i), v)
except AssertionError:
print f, i['LaunchTime'], i['Tags'], v
raise
class TestFilter(unittest.TestCase):
def test_filter_construction(self):
self.assertTrue(
isinstance(
filters.factory({'tag:ASV': 'absent'}),
base_filters.ValueFilter))
def test_filter_validation(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory, {'type': 'ax', 'xyz': 1})
class TestOrFilter(unittest.TestCase):
def test_or(self):
f = filters.factory({
'or': [
{'Architecture': 'x86_64'},
{'Architecture': 'armv8'}]})
results = [instance(Architecture='x86_64')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([instance(Architecture='amd64')]),
[])
class TestAndFilter(unittest.TestCase):
def test_and(self):
f = filters.factory({
'and': [
{'Architecture': 'x86_64'},
{'Color': 'green'}]})
results = [instance(Architecture='x86_64', Color='green')]
self.assertEqual(
f.process(results),
results)
self.assertEqual(
f.process([
instance(
Architecture='x86_64',
Color='blue')]),
[])
self.assertEqual(
f.process([
instance(
Architecture='x86_64')]),
[])
class TestGlobValue(unittest.TestCase):
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '*green*',
'op': 'glob'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='mighty green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
class TestRegexValue(unittest.TestCase):
def test_regex_validate(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory,
{'type': 'value',
'key': 'Color',
'value': '*green',
'op': 'regex'})
def test_regex_match(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': '.*green.*',
'op': 'regex'})
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='green papaya')),
True)
self.assertEqual(
f(instance(
Architecture='x86_64',
Color='blue')),
False)
self.assertEqual(
f(instance(
Architecture='x86_64')),
False)
class TestValueTypes(BaseFilterTest):
def test_normalize(self):
fdata = {
'type': 'value',
'key': 'tag:Name',
'value_type': 'normalize',
'value': 'compilelambda'
}
self.assertFilter(fdata, instance(), True)
def test_size(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'size',
'value': 2
}
self.assertFilter(fdata, instance(), True)
def test_integer(self):
fdata = {
'type': 'value',
'key': 'tag:Count',
'op': 'greater-than',
'value_type': 'integer',
'value': 0}
def i(d):
return instance(Tags=[{"Key": "Count", "Value": d}])
self.assertFilter(fdata, i('42'), True)
self.assertFilter(fdata, i('abc'), False)
fdata['op'] = 'equal'
self.assertFilter(fdata, i('abc'), True)
def test_swap(self):
fdata = {
'type': 'value',
'key': 'SecurityGroups[].GroupId',
'value_type': 'swap',
'op': 'in',
'value': 'sg-47b76f22'
}
self.assertFilter(fdata, instance(), True)
def test_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'age',
'value': 32}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), False)
self.assertFilter(fdata, i(one_month), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
def test_expiration(self):
now = datetime.now(tz=tz.tzutc())
three_months = now + timedelta(90)
two_months = now + timedelta(60)
def i(d):
return instance(LaunchTime=d)
fdata = {
'type': 'value',
'key': 'LaunchTime',
'op': 'less-than',
'value_type': 'expiration',
'value': 61}
self.assertFilter(fdata, i(three_months), False)
self.assertFilter(fdata, i(two_months), True)
self.assertFilter(fdata, i(now), True)
self.assertFilter(fdata, i(now.isoformat()), True)
class TestInstanceAge(BaseFilterTest):
def test_filter_instance_age(self):
now = datetime.now(tz=tz.tzutc())
three_months = now - timedelta(90)
two_months = now - timedelta(60)
one_month = now - timedelta(30)
def i(d):
return instance(LaunchTime=d)
for ii, v in [
(i(now), False),
(i(three_months), True),
(i(two_months), True),
(i(one_month), False)
]:
self.assertFilter({'type': 'instance-uptime'}, ii, v)
class TestMarkedForAction(BaseFilterTest):
def test_marked_for_op_with_skew(self):
now = datetime.now()
yesterday = datetime.now() - timedelta(7)
next_week = now + timedelta(7)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for inst, skew, expected in [
(i(next_week), 7, True),
(i(next_week), 3, False),
(i(now), 0, True),
(i(now), 5, True),
(i(yesterday), 5, True),
(i(now+timedelta(1)), 1, True),
(i(now+timedelta(2)), 1, False),
(i(now+timedelta(3)), 1, False)
]:
self.assertFilter(
{'type': 'marked-for-op', 'skew': skew}, inst, expected)
def test_filter_action_date(self):
now = datetime.now()
yesterday = now - timedelta(1)
tomorrow = now + timedelta(1)
def i(d, action='stop'):
return instance(Tags=[
{"Key": "maid_status",
"Value": "not compliant: %s@%s" % (
action, d.strftime("%Y/%m/%d"))}])
for ii, v in [
(i(yesterday), True),
(i(now), True),
(i(tomorrow), False),
(i(yesterday, 'terminate'), False)
]:
self.assertFilter({'type': 'marked-for-op'}, ii, v)
class EventFilterTest(BaseFilterTest):
def test_event_filter(self):
b = Bag(data={'mode': []})
event = event_data('event-instance-state.json')
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
self.assertTrue(filters.factory(f, b).process(
[instance()], event))
def test_event_no_mode(self):
b = Bag(data={'resource': 'something'})
f = {'type': 'event',
'key': 'detail.state',
'value': 'pending'}
self.assertRaises(
base_filters.FilterValidationError, filters.factory, f, b)
class TestInstanceValue(BaseFilterTest):
def test_filter_tag_count(self):
tags = []
for i in range(10):
tags.append({'Key': str(i), 'Value': str(i)})
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'lt'}, i, False)
tags.pop(0)
i = instance(Tags=tags)
self.assertFilter(
{'type': 'tag-count', 'op': 'gte', 'count': 9}, i, True)
def test_filter_tag(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'def'}, i, False)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ())
i = instance(Tags=[
{'Key': 'CMDB', 'Value': 'abcd'}])
self.assertFilter(
{'tag:ASV': 'absent'}, i, True)
self.assertEqual(
annotation(i, base_filters.ANNOTATION_KEY), ['tag:ASV'])
def test_present(self):
i = instance(Tags=[
{'Key': 'ASV', 'Value': ''}])
self.assertFilter(
{'type': 'value',
'key': 'tag:ASV',
'value': 'present'},
i, True)
def test_jmespath(self):
self.assertFilter(
{'Placement.AvailabilityZone': 'us-west-2c'},
instance(),
True)
self.assertFilter(
{'Placement.AvailabilityZone': 'us-east-1c'},
instance(),
False)
def test_complex_validator(self):
self.assertRaises(
base_filters.FilterValidationError,
filters.factory,
{"key": "xyz",
"type": "value"})
self.assertRaises(
base_filters.FilterValidationError,
filters.factory,
{"value": "xyz",
"type": "value"})
self.assertRaises(
base_filters.FilterValidationError,
filters.factory,
{"key": "xyz",
"value": "xyz",
"op": "oo",
"type": "value"})
def test_complex_value_filter(self):
self.assertFilter(
{"key": (
"length(BlockDeviceMappings"
"[?Ebs.DeleteOnTermination == `true`]"
".Ebs.DeleteOnTermination)"),
"value": 0,
"type": "value",
"op": "gt"},
instance(),
True)
def test_not_null_filter(self):
self.assertFilter(
{"key": "Hypervisor",
"value": "not-null",
"type": "value"},
instance(),
True)
class TestEqualValue(unittest.TestCase):
def test_eq(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'eq'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
def test_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'equal'})
self.assertEqual(
f(instance(Color='green')),
True)
self.assertEqual(
f(instance(Color='blue')),
False)
class TestNotEqualValue(unittest.TestCase):
def test_ne(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'ne'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
def test_not_equal(self):
f = filters.factory(
{'type': 'value',
'key': 'Color',
'value': 'green',
'op': 'not-equal'})
self.assertEqual(
f(instance(Color='green')),
False)
self.assertEqual(
f(instance(Color='blue')),
True)
class TestGreaterThanValue(unittest.TestCase):
def test_gt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'gt'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_greater_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'greater-than'})
self.assertEqual(
f(instance(Number=11)),
True)
self.assertEqual(
f(instance(Number=9)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestLessThanValue(unittest.TestCase):
def test_lt(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'lt'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
def test_less_than(self):
f = filters.factory(
{'type': 'value',
'key': 'Number',
'value': 10,
'op': 'less-than'})
self.assertEqual(
f(instance(Number=9)),
True)
self.assertEqual(
f(instance(Number=11)),
False)
self.assertEqual(
f(instance(Number=10)),
False)
class TestInList(unittest.TestCase):
def test_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'in'})
self.assertEqual(
f(instance(Thing='Foo')),
True)
self.assertEqual(
f(instance(Thing='Baz')),
False)
class TestNotInList(unittest.TestCase):
def test_ni(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'ni'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
def test_not_in(self):
f = filters.factory(
{'type': 'value',
'key': 'Thing',
'value': ['Foo', 'Bar', 'Quux'],
'op': 'not-in'})
self.assertEqual(
f(instance(Thing='Baz')),
True)
self.assertEqual(
f(instance(Thing='Foo')),
False)
if __name__ == '__main__':
unittest.main()
|
|
# Threshold functions
import os
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
from skimage.feature import greycomatrix, greycoprops
from scipy.ndimage import generic_filter
from plantcv.plantcv._debug import _debug
# Binary threshold
def binary(gray_img, threshold, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the threshold value.
Inputs:
gray_img = Grayscale image data
threshold = Threshold value (0-255)
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param threshold: int
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, threshold, max_value, threshold_method, "_binary_threshold_")
return bin_img
# Gaussian adaptive threshold
def gaussian(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the Gaussian adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, threshold_method,
"_gaussian_threshold_")
return bin_img
# Mean adaptive threshold
def mean(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the mean adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_MEAN_C, threshold_method,
"_mean_threshold_")
return bin_img
# Otsu autothreshold
def otsu(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image using Otsu's thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, 0, max_value, threshold_method, "_otsu_threshold_")
return bin_img
# Triangle autothreshold
def triangle(gray_img, max_value, object_type="light", xstep=1):
"""Creates a binary image from a grayscale image using Zack et al.'s (1977) thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
xstep = value to move along x-axis to determine the points from which to calculate distance recommended to
start at 1 and change if needed)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:param xstep: int
:return bin_img: numpy.ndarray
"""
# Calculate automatic threshold value based on triangle algorithm
hist = cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# Make histogram one array
newhist = []
for item in hist:
newhist.extend(item)
# Detect peaks
show = False
if params.debug == "plot":
show = True
ind = _detect_peaks(newhist, mph=None, mpd=1, show=show)
# Find point corresponding to highest peak
# Find intensity value (y) of highest peak
max_peak_int = max(list(newhist[i] for i in ind))
# Find value (x) of highest peak
max_peak = [i for i, x in enumerate(newhist) if x == max(newhist)]
# Combine x,y
max_peak_xy = [max_peak[0], max_peak_int]
# Find final point at end of long tail
end_x = len(newhist) - 1
end_y = newhist[end_x]
end_xy = [end_x, end_y]
# Define the known points
points = [max_peak_xy, end_xy]
x_coords, y_coords = zip(*points)
# Get threshold value
peaks = []
dists = []
for i in range(x_coords[0], x_coords[1], xstep):
distance = (((x_coords[1] - x_coords[0]) * (y_coords[0] - hist[i])) -
((x_coords[0] - i) * (y_coords[1] - y_coords[0]))) / math.sqrt(
(float(x_coords[1]) - float(x_coords[0])) *
(float(x_coords[1]) - float(x_coords[0])) +
((float(y_coords[1]) - float(y_coords[0])) *
(float(y_coords[1]) - float(y_coords[0]))))
peaks.append(i)
dists.append(distance)
autothresh = [peaks[x] for x in [i for i, x in enumerate(list(dists)) if x == max(list(dists))]]
autothreshval = autothresh[0]
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, autothreshval, max_value, threshold_method, "_triangle_threshold_")
# Additional figures created by this method, if debug is on
if params.debug is not None:
if params.debug == 'print':
_, ax = plt.subplots()
ax.plot(hist)
ax.set(title='Threshold value = {t}'.format(t=autothreshval))
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
fig_name_hist = os.path.join(params.debug_outdir,
str(params.device) + '_triangle_thresh_hist_' + str(autothreshval) + ".png")
# write the figure to current directory
plt.savefig(fig_name_hist, dpi=params.dpi)
# close pyplot plotting window
plt.clf()
elif params.debug == 'plot':
print('Threshold value = {t}'.format(t=autothreshval))
_, ax = plt.subplots()
ax.plot(hist)
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
plt.show()
return bin_img
def texture(gray_img, ksize, threshold, offset=3, texture_method='dissimilarity', borders='nearest',
max_value=255):
"""Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
This function is quite slow.
Inputs:
gray_img = Grayscale image data
ksize = Kernel size for texture measure calculation
threshold = Threshold value (0-255)
offset = Distance offsets
texture_method = Feature of a grey level co-occurrence matrix, either
'contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy',
or 'correlation'.For equations of different features see
scikit-image.
borders = How the array borders are handled, either 'reflect',
'constant', 'nearest', 'mirror', or 'wrap'
max_value = Value to apply above threshold (usually 255 = white)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param ksize: int
:param threshold: int
:param offset: int
:param texture_method: str
:param borders: str
:param max_value: int
:return bin_img: numpy.ndarray
"""
# Function that calculates the texture of a kernel
def calc_texture(inputs):
inputs = np.reshape(a=inputs, newshape=[ksize, ksize])
inputs = inputs.astype(np.uint8)
# Greycomatrix takes image, distance offset, angles (in radians), symmetric, and normed
# http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.greycomatrix
glcm = greycomatrix(inputs, [offset], [0], 256, symmetric=True, normed=True)
diss = greycoprops(glcm, texture_method)[0, 0]
return diss
# Make an array the same size as the original image
output = np.zeros(gray_img.shape, dtype=gray_img.dtype)
# Apply the texture function over the whole image
generic_filter(gray_img, calc_texture, size=ksize, output=output, mode=borders)
# Threshold so higher texture measurements stand out
bin_img = binary(gray_img=output, threshold=threshold, max_value=max_value, object_type='light')
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + "_texture_mask.png"))
return bin_img
def custom_range(img, lower_thresh, upper_thresh, channel='gray'):
"""Creates a thresholded image and mask from an RGB image and threshold values.
Inputs:
img = RGB or grayscale image data
lower_thresh = List of lower threshold values (0-255)
upper_thresh = List of upper threshold values (0-255)
channel = Color-space channels of interest (RGB, HSV, LAB, or gray)
Returns:
mask = Mask, binary image
masked_img = Masked image, keeping the part of image of interest
:param img: numpy.ndarray
:param lower_thresh: list
:param upper_thresh: list
:param channel: str
:return mask: numpy.ndarray
:return masked_img: numpy.ndarray
"""
if channel.upper() == 'HSV':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to HSV colorspace
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Separate channels
hue = hsv_img[:, :, 0]
sat = hsv_img[:, :, 1]
value = hsv_img[:, :, 2]
# Make a mask for each channel
h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
s_mask = cv2.inRange(sat, lower_thresh[1], upper_thresh[1])
v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=h_mask)
result = cv2.bitwise_and(result, result, mask=s_mask)
masked_img = cv2.bitwise_and(result, result, mask=v_mask)
# Combine masks
mask = cv2.bitwise_and(s_mask, h_mask)
mask = cv2.bitwise_and(mask, v_mask)
elif channel.upper() == 'RGB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Separate channels (pcv.readimage reads RGB images in as BGR)
blue = img[:, :, 0]
green = img[:, :, 1]
red = img[:, :, 2]
# Make a mask for each channel
b_mask = cv2.inRange(blue, lower_thresh[2], upper_thresh[2])
g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
r_mask = cv2.inRange(red, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=b_mask)
result = cv2.bitwise_and(result, result, mask=g_mask)
masked_img = cv2.bitwise_and(result, result, mask=r_mask)
# Combine masks
mask = cv2.bitwise_and(b_mask, g_mask)
mask = cv2.bitwise_and(mask, r_mask)
elif channel.upper() == 'LAB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to LAB colorspace
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# Separate channels (pcv.readimage reads RGB images in as BGR)
lightness = lab_img[:, :, 0]
green_magenta = lab_img[:, :, 1]
blue_yellow = lab_img[:, :, 2]
# Make a mask for each channel
l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=l_mask)
result = cv2.bitwise_and(result, result, mask=gm_mask)
masked_img = cv2.bitwise_and(result, result, mask=by_mask)
# Combine masks
mask = cv2.bitwise_and(l_mask, gm_mask)
mask = cv2.bitwise_and(mask, by_mask)
elif channel.upper() in ('GRAY', 'GREY'):
# Check threshold input
if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
fatal_error("If useing a grayscale colorspace, 1 threshold is needed for both the " +
"lower_thresh and upper_thresh.")
if len(np.shape(img)) == 3:
# Convert RGB image to grayscale colorspace
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
# Make a mask
mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
masked_img = cv2.bitwise_and(img, img, mask=mask)
else:
fatal_error(str(channel) + " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'.")
# Auto-increment the device counter
# Print or plot the binary image if debug is on
_debug(visual=masked_img, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh.png'))
_debug(visual=mask, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh_mask.png'))
return mask, masked_img
# Internal method for calling the OpenCV threshold function to reduce code duplication
def _call_threshold(gray_img, threshold, max_value, threshold_method, method_name):
# Threshold the image
ret, bin_img = cv2.threshold(gray_img, threshold, max_value, threshold_method)
if bin_img.dtype != 'uint16':
bin_img = np.uint8(bin_img)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir,
str(params.device) + method_name + str(threshold) + '.png'))
return bin_img
# Internal method for calling the OpenCV adaptiveThreshold function to reduce code duplication
def _call_adaptive_threshold(gray_img, max_value, adaptive_method, threshold_method, method_name):
# Threshold the image
bin_img = cv2.adaptiveThreshold(gray_img, max_value, adaptive_method, threshold_method, 11, 2)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + method_name + '.png'))
return bin_img
# Internal method for detecting peaks for the triangle autothreshold method
def _detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=None):
"""Marcos Duarte, https://github.com/demotu/BMC; version 1.0.4; license MIT
Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indices of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
from detect_peaks import detect_peaks
x = np.random.randn(100)
x[60:81] = np.nan
# detect all peaks and plot data
ind = detect_peaks(x, show=True)
print(ind)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# set minimum peak height = 0 and minimum peak distance = 20
detect_peaks(x, mph=0, mpd=20, show=True)
x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
# set minimum peak distance = 2
detect_peaks(x, mpd=2, show=True)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# detection of valleys instead of peaks
detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
x = [0, 1, 1, 0, 1, 1, 0]
# detect both edges
detect_peaks(x, edge='both', show=True)
x = [-2, 1, -2, 2, 1, 1, 3, 0]
# set threshold = 2
detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
# It is always the case that x.size=256 since 256 hardcoded in line 186 ->
# cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# if x.size < 3:
# return np.array([], dtype=int)
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
# indnan = np.where(np.isnan(x))[0]
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.inf
# dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# x will never contain NaN since calcHist will never return NaN
# if ind.size and indnan.size:
# # NaN's and values close to NaN's cannot be peaks
# ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
# if ind.size and ind[0] == 0:
# ind = ind[1:]
# if ind.size and ind[-1] == x.size - 1:
# ind = ind[:-1]
# We think the above code will never be reached given some of the hardcoded properties used
# # Where this function is used has hardcoded mph=None so this will never be used
# # remove peaks < minimum peak height
# if ind.size and mph is not None:
# ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if show:
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.nan
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
# Internal plotting function for the triangle autothreshold method
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02 * x.size, x.size * 1.02 - 1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1 * yrange, ymax + 0.1 * yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
plt.show()
def saturation(rgb_img, threshold=255, channel="any"):
"""Return a mask filtering out saturated pixels.
Inputs:
rgb_img = RGB image
threshold = value for threshold, above which is considered saturated
channel = how many channels must be saturated for the pixel to be masked out ("any", "all")
Returns:
masked_img = A binary image with the saturated regions blacked out.
:param rgb_img: np.ndarray
:param threshold: int
:param channel: str
:return masked_img: np.ndarray
"""
# Mask red, green, and blue saturation separately
b, g, r = cv2.split(rgb_img)
b_saturated = cv2.inRange(b, threshold, 255)
g_saturated = cv2.inRange(g, threshold, 255)
r_saturated = cv2.inRange(r, threshold, 255)
# Combine channel masks
if channel.lower() == "any":
# Consider a pixel saturated if any channel is saturated
saturated = cv2.bitwise_or(b_saturated, g_saturated)
saturated = cv2.bitwise_or(saturated, r_saturated)
elif channel.lower() == "all":
# Consider a pixel saturated only if all channels are saturated
saturated = cv2.bitwise_and(b_saturated, g_saturated)
saturated = cv2.bitwise_and(saturated, r_saturated)
else:
fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.")
# Invert "saturated" before returning, so saturated = black
bin_img = cv2.bitwise_not(saturated)
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png'))
return bin_img
def mask_bad(float_img, bad_type='native'):
""" Create a mask with desired "bad" pixels of the input floaat image marked.
Inputs:
float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some
calculation based on the original image. So the datatype is float, and it is possible to have some
"bad" values, i.e. nan and/or inf
bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
Returns:
mask = A mask indicating the locations of "bad" pixels
:param float_img: numpy.ndarray
:param bad_type: str
:return mask: numpy.ndarray
"""
size_img = np.shape(float_img)
if len(size_img) != 2:
fatal_error('Input image is not a single channel image!')
mask = np.zeros(size_img, dtype='uint8')
idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)
# neither nan nor inf exists in the image, print out a message and the mask would just be all zero
if len(idx_nan) == 0 and len(idx_inf) == 0:
mask = mask
print('Neither nan nor inf appears in the current image.')
# at least one of the "bad" exists
# desired bad to mark is "native"
elif bad_type.lower() == 'native':
mask[idx_nan, idy_nan] = 255
mask[idx_inf, idy_inf] = 255
elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
mask[idx_nan, idy_nan] = 255
elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
mask[idx_inf, idy_inf] = 255
# "bad" exists but not the user desired bad type, return the all-zero mask
else:
mask = mask
print('{} does not appear in the current image.'.format(bad_type.lower()))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + "_bad_mask.png"))
return mask
|
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
assert_raise_message(ValueError, msg, le.inverse_transform, [-2])
assert_raise_message(ValueError, msg, le.inverse_transform, [-2, -3, -4])
# Fail on inverse_transform("")
msg = "bad input shape ()"
assert_raise_message(ValueError, msg, le.inverse_transform, "")
def test_label_encoder_empty_array():
le = LabelEncoder()
le.fit(np.array(["1", "2", "1", "2", "2"]))
# test empty transform
transformed = le.transform([])
assert_array_equal(np.array([]), transformed)
# test empty inverse transform
inverse_transformed = le.inverse_transform([])
assert_array_equal(np.array([]), inverse_transformed)
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
w = 'unknown class(es) [0, 4] will be ignored'
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.